1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_flow.h" 6 7 /* Describe properties of a protocol header field */ 8 struct ice_flow_field_info { 9 enum ice_flow_seg_hdr hdr; 10 s16 off; /* Offset from start of a protocol header, in bits */ 11 u16 size; /* Size of fields in bits */ 12 }; 13 14 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \ 15 .hdr = _hdr, \ 16 .off = (_offset_bytes) * BITS_PER_BYTE, \ 17 .size = (_size_bytes) * BITS_PER_BYTE, \ 18 } 19 20 /* Table containing properties of supported protocol header fields */ 21 static const 22 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { 23 /* Ether */ 24 /* ICE_FLOW_FIELD_IDX_ETH_DA */ 25 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN), 26 /* ICE_FLOW_FIELD_IDX_ETH_SA */ 27 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN), 28 /* ICE_FLOW_FIELD_IDX_S_VLAN */ 29 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, sizeof(__be16)), 30 /* ICE_FLOW_FIELD_IDX_C_VLAN */ 31 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, sizeof(__be16)), 32 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */ 33 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, sizeof(__be16)), 34 /* IPv4 / IPv6 */ 35 /* ICE_FLOW_FIELD_IDX_IPV4_SA */ 36 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)), 37 /* ICE_FLOW_FIELD_IDX_IPV4_DA */ 38 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)), 39 /* ICE_FLOW_FIELD_IDX_IPV6_SA */ 40 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)), 41 /* ICE_FLOW_FIELD_IDX_IPV6_DA */ 42 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)), 43 /* Transport */ 44 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */ 45 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)), 46 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */ 47 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)), 48 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */ 49 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)), 50 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */ 51 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)), 52 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */ 53 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)), 54 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ 55 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)), 56 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */ 57 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1), 58 /* ARP */ 59 /* ICE_FLOW_FIELD_IDX_ARP_SIP */ 60 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)), 61 /* ICE_FLOW_FIELD_IDX_ARP_DIP */ 62 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, sizeof(struct in_addr)), 63 /* ICE_FLOW_FIELD_IDX_ARP_SHA */ 64 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN), 65 /* ICE_FLOW_FIELD_IDX_ARP_DHA */ 66 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN), 67 /* ICE_FLOW_FIELD_IDX_ARP_OP */ 68 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, sizeof(__be16)), 69 /* ICMP */ 70 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */ 71 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, 1), 72 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */ 73 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, 1), 74 /* GRE */ 75 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */ 76 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, 77 sizeof_field(struct gre_full_hdr, key)), 78 }; 79 80 /* Bitmaps indicating relevant packet types for a particular protocol header 81 * 82 * Packet types for packets with an Outer/First/Single MAC header 83 */ 84 static const u32 ice_ptypes_mac_ofos[] = { 85 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB, 86 0x0000077E, 0x00000000, 0x00000000, 0x00000000, 87 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000, 88 0x00000000, 0x00000000, 0x00000000, 0x00000000, 89 0x00000000, 0x00000000, 0x00000000, 0x00000000, 90 0x00000000, 0x00000000, 0x00000000, 0x00000000, 91 0x00000000, 0x00000000, 0x00000000, 0x00000000, 92 0x00000000, 0x00000000, 0x00000000, 0x00000000, 93 }; 94 95 /* Packet types for packets with an Innermost/Last MAC VLAN header */ 96 static const u32 ice_ptypes_macvlan_il[] = { 97 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000, 98 0x0000077E, 0x00000000, 0x00000000, 0x00000000, 99 0x00000000, 0x00000000, 0x00000000, 0x00000000, 100 0x00000000, 0x00000000, 0x00000000, 0x00000000, 101 0x00000000, 0x00000000, 0x00000000, 0x00000000, 102 0x00000000, 0x00000000, 0x00000000, 0x00000000, 103 0x00000000, 0x00000000, 0x00000000, 0x00000000, 104 0x00000000, 0x00000000, 0x00000000, 0x00000000, 105 }; 106 107 /* Packet types for packets with an Outer/First/Single IPv4 header */ 108 static const u32 ice_ptypes_ipv4_ofos[] = { 109 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, 110 0x00000000, 0x00000000, 0x00000000, 0x00000000, 111 0x00000000, 0x00000000, 0x00000000, 0x00000000, 112 0x00000000, 0x00000000, 0x00000000, 0x00000000, 113 0x00000000, 0x00000000, 0x00000000, 0x00000000, 114 0x00000000, 0x00000000, 0x00000000, 0x00000000, 115 0x00000000, 0x00000000, 0x00000000, 0x00000000, 116 0x00000000, 0x00000000, 0x00000000, 0x00000000, 117 }; 118 119 /* Packet types for packets with an Innermost/Last IPv4 header */ 120 static const u32 ice_ptypes_ipv4_il[] = { 121 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B, 122 0x0000000E, 0x00000000, 0x00000000, 0x00000000, 123 0x00000000, 0x00000000, 0x00000000, 0x00000000, 124 0x00000000, 0x00000000, 0x00000000, 0x00000000, 125 0x00000000, 0x00000000, 0x00000000, 0x00000000, 126 0x00000000, 0x00000000, 0x00000000, 0x00000000, 127 0x00000000, 0x00000000, 0x00000000, 0x00000000, 128 0x00000000, 0x00000000, 0x00000000, 0x00000000, 129 }; 130 131 /* Packet types for packets with an Outer/First/Single IPv6 header */ 132 static const u32 ice_ptypes_ipv6_ofos[] = { 133 0x00000000, 0x00000000, 0x77000000, 0x10002000, 134 0x00000000, 0x00000000, 0x00000000, 0x00000000, 135 0x00000000, 0x00000000, 0x00000000, 0x00000000, 136 0x00000000, 0x00000000, 0x00000000, 0x00000000, 137 0x00000000, 0x00000000, 0x00000000, 0x00000000, 138 0x00000000, 0x00000000, 0x00000000, 0x00000000, 139 0x00000000, 0x00000000, 0x00000000, 0x00000000, 140 0x00000000, 0x00000000, 0x00000000, 0x00000000, 141 }; 142 143 /* Packet types for packets with an Innermost/Last IPv6 header */ 144 static const u32 ice_ptypes_ipv6_il[] = { 145 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000, 146 0x00000770, 0x00000000, 0x00000000, 0x00000000, 147 0x00000000, 0x00000000, 0x00000000, 0x00000000, 148 0x00000000, 0x00000000, 0x00000000, 0x00000000, 149 0x00000000, 0x00000000, 0x00000000, 0x00000000, 150 0x00000000, 0x00000000, 0x00000000, 0x00000000, 151 0x00000000, 0x00000000, 0x00000000, 0x00000000, 152 0x00000000, 0x00000000, 0x00000000, 0x00000000, 153 }; 154 155 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */ 156 static const u32 ice_ipv4_ofos_no_l4[] = { 157 0x10C00000, 0x04000800, 0x00000000, 0x00000000, 158 0x00000000, 0x00000000, 0x00000000, 0x00000000, 159 0x00000000, 0x00000000, 0x00000000, 0x00000000, 160 0x00000000, 0x00000000, 0x00000000, 0x00000000, 161 0x00000000, 0x00000000, 0x00000000, 0x00000000, 162 0x00000000, 0x00000000, 0x00000000, 0x00000000, 163 0x00000000, 0x00000000, 0x00000000, 0x00000000, 164 0x00000000, 0x00000000, 0x00000000, 0x00000000, 165 }; 166 167 /* Packet types for packets with an Outermost/First ARP header */ 168 static const u32 ice_ptypes_arp_of[] = { 169 0x00000800, 0x00000000, 0x00000000, 0x00000000, 170 0x00000000, 0x00000000, 0x00000000, 0x00000000, 171 0x00000000, 0x00000000, 0x00000000, 0x00000000, 172 0x00000000, 0x00000000, 0x00000000, 0x00000000, 173 0x00000000, 0x00000000, 0x00000000, 0x00000000, 174 0x00000000, 0x00000000, 0x00000000, 0x00000000, 175 0x00000000, 0x00000000, 0x00000000, 0x00000000, 176 0x00000000, 0x00000000, 0x00000000, 0x00000000, 177 }; 178 179 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */ 180 static const u32 ice_ipv4_il_no_l4[] = { 181 0x60000000, 0x18043008, 0x80000002, 0x6010c021, 182 0x00000008, 0x00000000, 0x00000000, 0x00000000, 183 0x00000000, 0x00000000, 0x00000000, 0x00000000, 184 0x00000000, 0x00000000, 0x00000000, 0x00000000, 185 0x00000000, 0x00000000, 0x00000000, 0x00000000, 186 0x00000000, 0x00000000, 0x00000000, 0x00000000, 187 0x00000000, 0x00000000, 0x00000000, 0x00000000, 188 0x00000000, 0x00000000, 0x00000000, 0x00000000, 189 }; 190 191 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */ 192 static const u32 ice_ipv6_ofos_no_l4[] = { 193 0x00000000, 0x00000000, 0x43000000, 0x10002000, 194 0x00000000, 0x00000000, 0x00000000, 0x00000000, 195 0x00000000, 0x00000000, 0x00000000, 0x00000000, 196 0x00000000, 0x00000000, 0x00000000, 0x00000000, 197 0x00000000, 0x00000000, 0x00000000, 0x00000000, 198 0x00000000, 0x00000000, 0x00000000, 0x00000000, 199 0x00000000, 0x00000000, 0x00000000, 0x00000000, 200 0x00000000, 0x00000000, 0x00000000, 0x00000000, 201 }; 202 203 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */ 204 static const u32 ice_ipv6_il_no_l4[] = { 205 0x00000000, 0x02180430, 0x0000010c, 0x086010c0, 206 0x00000430, 0x00000000, 0x00000000, 0x00000000, 207 0x00000000, 0x00000000, 0x00000000, 0x00000000, 208 0x00000000, 0x00000000, 0x00000000, 0x00000000, 209 0x00000000, 0x00000000, 0x00000000, 0x00000000, 210 0x00000000, 0x00000000, 0x00000000, 0x00000000, 211 0x00000000, 0x00000000, 0x00000000, 0x00000000, 212 0x00000000, 0x00000000, 0x00000000, 0x00000000, 213 }; 214 215 /* UDP Packet types for non-tunneled packets or tunneled 216 * packets with inner UDP. 217 */ 218 static const u32 ice_ptypes_udp_il[] = { 219 0x81000000, 0x20204040, 0x04000010, 0x80810102, 220 0x00000040, 0x00000000, 0x00000000, 0x00000000, 221 0x00000000, 0x00000000, 0x00000000, 0x00000000, 222 0x00000000, 0x00000000, 0x00000000, 0x00000000, 223 0x00000000, 0x00000000, 0x00000000, 0x00000000, 224 0x00000000, 0x00000000, 0x00000000, 0x00000000, 225 0x00000000, 0x00000000, 0x00000000, 0x00000000, 226 0x00000000, 0x00000000, 0x00000000, 0x00000000, 227 }; 228 229 /* Packet types for packets with an Innermost/Last TCP header */ 230 static const u32 ice_ptypes_tcp_il[] = { 231 0x04000000, 0x80810102, 0x10000040, 0x02040408, 232 0x00000102, 0x00000000, 0x00000000, 0x00000000, 233 0x00000000, 0x00000000, 0x00000000, 0x00000000, 234 0x00000000, 0x00000000, 0x00000000, 0x00000000, 235 0x00000000, 0x00000000, 0x00000000, 0x00000000, 236 0x00000000, 0x00000000, 0x00000000, 0x00000000, 237 0x00000000, 0x00000000, 0x00000000, 0x00000000, 238 0x00000000, 0x00000000, 0x00000000, 0x00000000, 239 }; 240 241 /* Packet types for packets with an Innermost/Last SCTP header */ 242 static const u32 ice_ptypes_sctp_il[] = { 243 0x08000000, 0x01020204, 0x20000081, 0x04080810, 244 0x00000204, 0x00000000, 0x00000000, 0x00000000, 245 0x00000000, 0x00000000, 0x00000000, 0x00000000, 246 0x00000000, 0x00000000, 0x00000000, 0x00000000, 247 0x00000000, 0x00000000, 0x00000000, 0x00000000, 248 0x00000000, 0x00000000, 0x00000000, 0x00000000, 249 0x00000000, 0x00000000, 0x00000000, 0x00000000, 250 }; 251 252 /* Packet types for packets with an Outermost/First ICMP header */ 253 static const u32 ice_ptypes_icmp_of[] = { 254 0x10000000, 0x00000000, 0x00000000, 0x00000000, 255 0x00000000, 0x00000000, 0x00000000, 0x00000000, 256 0x00000000, 0x00000000, 0x00000000, 0x00000000, 257 0x00000000, 0x00000000, 0x00000000, 0x00000000, 258 0x00000000, 0x00000000, 0x00000000, 0x00000000, 259 0x00000000, 0x00000000, 0x00000000, 0x00000000, 260 0x00000000, 0x00000000, 0x00000000, 0x00000000, 261 0x00000000, 0x00000000, 0x00000000, 0x00000000, 262 }; 263 264 /* Packet types for packets with an Innermost/Last ICMP header */ 265 static const u32 ice_ptypes_icmp_il[] = { 266 0x00000000, 0x02040408, 0x40000102, 0x08101020, 267 0x00000408, 0x00000000, 0x00000000, 0x00000000, 268 0x00000000, 0x00000000, 0x42108000, 0x00000000, 269 0x00000000, 0x00000000, 0x00000000, 0x00000000, 270 0x00000000, 0x00000000, 0x00000000, 0x00000000, 271 0x00000000, 0x00000000, 0x00000000, 0x00000000, 272 0x00000000, 0x00000000, 0x00000000, 0x00000000, 273 0x00000000, 0x00000000, 0x00000000, 0x00000000, 274 }; 275 276 /* Packet types for packets with an Outermost/First GRE header */ 277 static const u32 ice_ptypes_gre_of[] = { 278 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000, 279 0x0000017E, 0x00000000, 0x00000000, 0x00000000, 280 0x00000000, 0x00000000, 0x00000000, 0x00000000, 281 0x00000000, 0x00000000, 0x00000000, 0x00000000, 282 0x00000000, 0x00000000, 0x00000000, 0x00000000, 283 0x00000000, 0x00000000, 0x00000000, 0x00000000, 284 0x00000000, 0x00000000, 0x00000000, 0x00000000, 285 0x00000000, 0x00000000, 0x00000000, 0x00000000, 286 }; 287 288 /* Packet types for packets with an Innermost/Last MAC header */ 289 static const u32 ice_ptypes_mac_il[] = { 290 0x00000000, 0x00000000, 0x00000000, 0x00000000, 291 0x00000000, 0x00000000, 0x00000000, 0x00000000, 292 0x00000000, 0x00000000, 0x00000000, 0x00000000, 293 0x00000000, 0x00000000, 0x00000000, 0x00000000, 294 0x00000000, 0x00000000, 0x00000000, 0x00000000, 295 0x00000000, 0x00000000, 0x00000000, 0x00000000, 296 0x00000000, 0x00000000, 0x00000000, 0x00000000, 297 0x00000000, 0x00000000, 0x00000000, 0x00000000, 298 }; 299 300 /* Manage parameters and info. used during the creation of a flow profile */ 301 struct ice_flow_prof_params { 302 enum ice_block blk; 303 u16 entry_length; /* # of bytes formatted entry will require */ 304 u8 es_cnt; 305 struct ice_flow_prof *prof; 306 307 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0 308 * This will give us the direction flags. 309 */ 310 struct ice_fv_word es[ICE_MAX_FV_WORDS]; 311 DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); 312 }; 313 314 #define ICE_FLOW_SEG_HDRS_L3_MASK \ 315 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP) 316 #define ICE_FLOW_SEG_HDRS_L4_MASK \ 317 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \ 318 ICE_FLOW_SEG_HDR_SCTP) 319 320 /** 321 * ice_flow_val_hdrs - validates packet segments for valid protocol headers 322 * @segs: array of one or more packet segments that describe the flow 323 * @segs_cnt: number of packet segments provided 324 */ 325 static enum ice_status 326 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) 327 { 328 u8 i; 329 330 for (i = 0; i < segs_cnt; i++) { 331 /* Multiple L3 headers */ 332 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && 333 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) 334 return ICE_ERR_PARAM; 335 336 /* Multiple L4 headers */ 337 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && 338 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) 339 return ICE_ERR_PARAM; 340 } 341 342 return 0; 343 } 344 345 /* Sizes of fixed known protocol headers without header options */ 346 #define ICE_FLOW_PROT_HDR_SZ_MAC 14 347 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2) 348 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20 349 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40 350 #define ICE_FLOW_PROT_HDR_SZ_ARP 28 351 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8 352 #define ICE_FLOW_PROT_HDR_SZ_TCP 20 353 #define ICE_FLOW_PROT_HDR_SZ_UDP 8 354 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12 355 356 /** 357 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers 358 * @params: information about the flow to be processed 359 * @seg: index of packet segment whose header size is to be determined 360 */ 361 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg) 362 { 363 u16 sz; 364 365 /* L2 headers */ 366 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ? 367 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC; 368 369 /* L3 headers */ 370 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) 371 sz += ICE_FLOW_PROT_HDR_SZ_IPV4; 372 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) 373 sz += ICE_FLOW_PROT_HDR_SZ_IPV6; 374 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP) 375 sz += ICE_FLOW_PROT_HDR_SZ_ARP; 376 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK) 377 /* An L3 header is required if L4 is specified */ 378 return 0; 379 380 /* L4 headers */ 381 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP) 382 sz += ICE_FLOW_PROT_HDR_SZ_ICMP; 383 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP) 384 sz += ICE_FLOW_PROT_HDR_SZ_TCP; 385 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP) 386 sz += ICE_FLOW_PROT_HDR_SZ_UDP; 387 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP) 388 sz += ICE_FLOW_PROT_HDR_SZ_SCTP; 389 390 return sz; 391 } 392 393 /** 394 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments 395 * @params: information about the flow to be processed 396 * 397 * This function identifies the packet types associated with the protocol 398 * headers being present in packet segments of the specified flow profile. 399 */ 400 static enum ice_status 401 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) 402 { 403 struct ice_flow_prof *prof; 404 u8 i; 405 406 memset(params->ptypes, 0xff, sizeof(params->ptypes)); 407 408 prof = params->prof; 409 410 for (i = 0; i < params->prof->segs_cnt; i++) { 411 const unsigned long *src; 412 u32 hdrs; 413 414 hdrs = prof->segs[i].hdrs; 415 416 if (hdrs & ICE_FLOW_SEG_HDR_ETH) { 417 src = !i ? (const unsigned long *)ice_ptypes_mac_ofos : 418 (const unsigned long *)ice_ptypes_mac_il; 419 bitmap_and(params->ptypes, params->ptypes, src, 420 ICE_FLOW_PTYPE_MAX); 421 } 422 423 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) { 424 src = (const unsigned long *)ice_ptypes_macvlan_il; 425 bitmap_and(params->ptypes, params->ptypes, src, 426 ICE_FLOW_PTYPE_MAX); 427 } 428 429 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) { 430 bitmap_and(params->ptypes, params->ptypes, 431 (const unsigned long *)ice_ptypes_arp_of, 432 ICE_FLOW_PTYPE_MAX); 433 } 434 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) && 435 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) { 436 src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 : 437 (const unsigned long *)ice_ipv4_il_no_l4; 438 bitmap_and(params->ptypes, params->ptypes, src, 439 ICE_FLOW_PTYPE_MAX); 440 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) { 441 src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos : 442 (const unsigned long *)ice_ptypes_ipv4_il; 443 bitmap_and(params->ptypes, params->ptypes, src, 444 ICE_FLOW_PTYPE_MAX); 445 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) && 446 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) { 447 src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 : 448 (const unsigned long *)ice_ipv6_il_no_l4; 449 bitmap_and(params->ptypes, params->ptypes, src, 450 ICE_FLOW_PTYPE_MAX); 451 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) { 452 src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos : 453 (const unsigned long *)ice_ptypes_ipv6_il; 454 bitmap_and(params->ptypes, params->ptypes, src, 455 ICE_FLOW_PTYPE_MAX); 456 } 457 458 if (hdrs & ICE_FLOW_SEG_HDR_UDP) { 459 src = (const unsigned long *)ice_ptypes_udp_il; 460 bitmap_and(params->ptypes, params->ptypes, src, 461 ICE_FLOW_PTYPE_MAX); 462 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) { 463 bitmap_and(params->ptypes, params->ptypes, 464 (const unsigned long *)ice_ptypes_tcp_il, 465 ICE_FLOW_PTYPE_MAX); 466 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) { 467 src = (const unsigned long *)ice_ptypes_sctp_il; 468 bitmap_and(params->ptypes, params->ptypes, src, 469 ICE_FLOW_PTYPE_MAX); 470 } 471 472 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) { 473 src = !i ? (const unsigned long *)ice_ptypes_icmp_of : 474 (const unsigned long *)ice_ptypes_icmp_il; 475 bitmap_and(params->ptypes, params->ptypes, src, 476 ICE_FLOW_PTYPE_MAX); 477 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) { 478 if (!i) { 479 src = (const unsigned long *)ice_ptypes_gre_of; 480 bitmap_and(params->ptypes, params->ptypes, 481 src, ICE_FLOW_PTYPE_MAX); 482 } 483 } 484 } 485 486 return 0; 487 } 488 489 /** 490 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field 491 * @hw: pointer to the HW struct 492 * @params: information about the flow to be processed 493 * @seg: packet segment index of the field to be extracted 494 * @fld: ID of field to be extracted 495 * 496 * This function determines the protocol ID, offset, and size of the given 497 * field. It then allocates one or more extraction sequence entries for the 498 * given field, and fill the entries with protocol ID and offset information. 499 */ 500 static enum ice_status 501 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, 502 u8 seg, enum ice_flow_field fld) 503 { 504 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL; 505 u8 fv_words = hw->blk[params->blk].es.fvw; 506 struct ice_flow_fld_info *flds; 507 u16 cnt, ese_bits, i; 508 u16 off; 509 510 flds = params->prof->segs[seg].fields; 511 512 switch (fld) { 513 case ICE_FLOW_FIELD_IDX_ETH_DA: 514 case ICE_FLOW_FIELD_IDX_ETH_SA: 515 case ICE_FLOW_FIELD_IDX_S_VLAN: 516 case ICE_FLOW_FIELD_IDX_C_VLAN: 517 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL; 518 break; 519 case ICE_FLOW_FIELD_IDX_ETH_TYPE: 520 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL; 521 break; 522 case ICE_FLOW_FIELD_IDX_IPV4_SA: 523 case ICE_FLOW_FIELD_IDX_IPV4_DA: 524 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; 525 break; 526 case ICE_FLOW_FIELD_IDX_IPV6_SA: 527 case ICE_FLOW_FIELD_IDX_IPV6_DA: 528 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; 529 break; 530 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT: 531 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT: 532 case ICE_FLOW_FIELD_IDX_TCP_FLAGS: 533 prot_id = ICE_PROT_TCP_IL; 534 break; 535 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT: 536 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT: 537 prot_id = ICE_PROT_UDP_IL_OR_S; 538 break; 539 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT: 540 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT: 541 prot_id = ICE_PROT_SCTP_IL; 542 break; 543 case ICE_FLOW_FIELD_IDX_ARP_SIP: 544 case ICE_FLOW_FIELD_IDX_ARP_DIP: 545 case ICE_FLOW_FIELD_IDX_ARP_SHA: 546 case ICE_FLOW_FIELD_IDX_ARP_DHA: 547 case ICE_FLOW_FIELD_IDX_ARP_OP: 548 prot_id = ICE_PROT_ARP_OF; 549 break; 550 case ICE_FLOW_FIELD_IDX_ICMP_TYPE: 551 case ICE_FLOW_FIELD_IDX_ICMP_CODE: 552 /* ICMP type and code share the same extraction seq. entry */ 553 prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ? 554 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL; 555 break; 556 case ICE_FLOW_FIELD_IDX_GRE_KEYID: 557 prot_id = ICE_PROT_GRE_OF; 558 break; 559 default: 560 return ICE_ERR_NOT_IMPL; 561 } 562 563 /* Each extraction sequence entry is a word in size, and extracts a 564 * word-aligned offset from a protocol header. 565 */ 566 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE; 567 568 flds[fld].xtrct.prot_id = prot_id; 569 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) * 570 ICE_FLOW_FV_EXTRACT_SZ; 571 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits); 572 flds[fld].xtrct.idx = params->es_cnt; 573 574 /* Adjust the next field-entry index after accommodating the number of 575 * entries this field consumes 576 */ 577 cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size, 578 ese_bits); 579 580 /* Fill in the extraction sequence entries needed for this field */ 581 off = flds[fld].xtrct.off; 582 for (i = 0; i < cnt; i++) { 583 u8 idx; 584 585 /* Make sure the number of extraction sequence required 586 * does not exceed the block's capability 587 */ 588 if (params->es_cnt >= fv_words) 589 return ICE_ERR_MAX_LIMIT; 590 591 /* some blocks require a reversed field vector layout */ 592 if (hw->blk[params->blk].es.reverse) 593 idx = fv_words - params->es_cnt - 1; 594 else 595 idx = params->es_cnt; 596 597 params->es[idx].prot_id = prot_id; 598 params->es[idx].off = off; 599 params->es_cnt++; 600 601 off += ICE_FLOW_FV_EXTRACT_SZ; 602 } 603 604 return 0; 605 } 606 607 /** 608 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes 609 * @hw: pointer to the HW struct 610 * @params: information about the flow to be processed 611 * @seg: index of packet segment whose raw fields are to be extracted 612 */ 613 static enum ice_status 614 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, 615 u8 seg) 616 { 617 u16 fv_words; 618 u16 hdrs_sz; 619 u8 i; 620 621 if (!params->prof->segs[seg].raws_cnt) 622 return 0; 623 624 if (params->prof->segs[seg].raws_cnt > 625 ARRAY_SIZE(params->prof->segs[seg].raws)) 626 return ICE_ERR_MAX_LIMIT; 627 628 /* Offsets within the segment headers are not supported */ 629 hdrs_sz = ice_flow_calc_seg_sz(params, seg); 630 if (!hdrs_sz) 631 return ICE_ERR_PARAM; 632 633 fv_words = hw->blk[params->blk].es.fvw; 634 635 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) { 636 struct ice_flow_seg_fld_raw *raw; 637 u16 off, cnt, j; 638 639 raw = ¶ms->prof->segs[seg].raws[i]; 640 641 /* Storing extraction information */ 642 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S; 643 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) * 644 ICE_FLOW_FV_EXTRACT_SZ; 645 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) * 646 BITS_PER_BYTE; 647 raw->info.xtrct.idx = params->es_cnt; 648 649 /* Determine the number of field vector entries this raw field 650 * consumes. 651 */ 652 cnt = DIV_ROUND_UP(raw->info.xtrct.disp + 653 (raw->info.src.last * BITS_PER_BYTE), 654 (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE)); 655 off = raw->info.xtrct.off; 656 for (j = 0; j < cnt; j++) { 657 u16 idx; 658 659 /* Make sure the number of extraction sequence required 660 * does not exceed the block's capability 661 */ 662 if (params->es_cnt >= hw->blk[params->blk].es.count || 663 params->es_cnt >= ICE_MAX_FV_WORDS) 664 return ICE_ERR_MAX_LIMIT; 665 666 /* some blocks require a reversed field vector layout */ 667 if (hw->blk[params->blk].es.reverse) 668 idx = fv_words - params->es_cnt - 1; 669 else 670 idx = params->es_cnt; 671 672 params->es[idx].prot_id = raw->info.xtrct.prot_id; 673 params->es[idx].off = off; 674 params->es_cnt++; 675 off += ICE_FLOW_FV_EXTRACT_SZ; 676 } 677 } 678 679 return 0; 680 } 681 682 /** 683 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments 684 * @hw: pointer to the HW struct 685 * @params: information about the flow to be processed 686 * 687 * This function iterates through all matched fields in the given segments, and 688 * creates an extraction sequence for the fields. 689 */ 690 static enum ice_status 691 ice_flow_create_xtrct_seq(struct ice_hw *hw, 692 struct ice_flow_prof_params *params) 693 { 694 struct ice_flow_prof *prof = params->prof; 695 enum ice_status status = 0; 696 u8 i; 697 698 for (i = 0; i < prof->segs_cnt; i++) { 699 u8 j; 700 701 for_each_set_bit(j, (unsigned long *)&prof->segs[i].match, 702 ICE_FLOW_FIELD_IDX_MAX) { 703 status = ice_flow_xtract_fld(hw, params, i, 704 (enum ice_flow_field)j); 705 if (status) 706 return status; 707 } 708 709 /* Process raw matching bytes */ 710 status = ice_flow_xtract_raws(hw, params, i); 711 if (status) 712 return status; 713 } 714 715 return status; 716 } 717 718 /** 719 * ice_flow_proc_segs - process all packet segments associated with a profile 720 * @hw: pointer to the HW struct 721 * @params: information about the flow to be processed 722 */ 723 static enum ice_status 724 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) 725 { 726 enum ice_status status; 727 728 status = ice_flow_proc_seg_hdrs(params); 729 if (status) 730 return status; 731 732 status = ice_flow_create_xtrct_seq(hw, params); 733 if (status) 734 return status; 735 736 switch (params->blk) { 737 case ICE_BLK_FD: 738 case ICE_BLK_RSS: 739 status = 0; 740 break; 741 default: 742 return ICE_ERR_NOT_IMPL; 743 } 744 745 return status; 746 } 747 748 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001 749 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002 750 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004 751 752 /** 753 * ice_flow_find_prof_conds - Find a profile matching headers and conditions 754 * @hw: pointer to the HW struct 755 * @blk: classification stage 756 * @dir: flow direction 757 * @segs: array of one or more packet segments that describe the flow 758 * @segs_cnt: number of packet segments provided 759 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI) 760 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*) 761 */ 762 static struct ice_flow_prof * 763 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk, 764 enum ice_flow_dir dir, struct ice_flow_seg_info *segs, 765 u8 segs_cnt, u16 vsi_handle, u32 conds) 766 { 767 struct ice_flow_prof *p, *prof = NULL; 768 769 mutex_lock(&hw->fl_profs_locks[blk]); 770 list_for_each_entry(p, &hw->fl_profs[blk], l_entry) 771 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) && 772 segs_cnt && segs_cnt == p->segs_cnt) { 773 u8 i; 774 775 /* Check for profile-VSI association if specified */ 776 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) && 777 ice_is_vsi_valid(hw, vsi_handle) && 778 !test_bit(vsi_handle, p->vsis)) 779 continue; 780 781 /* Protocol headers must be checked. Matched fields are 782 * checked if specified. 783 */ 784 for (i = 0; i < segs_cnt; i++) 785 if (segs[i].hdrs != p->segs[i].hdrs || 786 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) && 787 segs[i].match != p->segs[i].match)) 788 break; 789 790 /* A match is found if all segments are matched */ 791 if (i == segs_cnt) { 792 prof = p; 793 break; 794 } 795 } 796 mutex_unlock(&hw->fl_profs_locks[blk]); 797 798 return prof; 799 } 800 801 /** 802 * ice_flow_find_prof_id - Look up a profile with given profile ID 803 * @hw: pointer to the HW struct 804 * @blk: classification stage 805 * @prof_id: unique ID to identify this flow profile 806 */ 807 static struct ice_flow_prof * 808 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id) 809 { 810 struct ice_flow_prof *p; 811 812 list_for_each_entry(p, &hw->fl_profs[blk], l_entry) 813 if (p->id == prof_id) 814 return p; 815 816 return NULL; 817 } 818 819 /** 820 * ice_dealloc_flow_entry - Deallocate flow entry memory 821 * @hw: pointer to the HW struct 822 * @entry: flow entry to be removed 823 */ 824 static void 825 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry) 826 { 827 if (!entry) 828 return; 829 830 if (entry->entry) 831 devm_kfree(ice_hw_to_dev(hw), entry->entry); 832 833 devm_kfree(ice_hw_to_dev(hw), entry); 834 } 835 836 /** 837 * ice_flow_rem_entry_sync - Remove a flow entry 838 * @hw: pointer to the HW struct 839 * @blk: classification stage 840 * @entry: flow entry to be removed 841 */ 842 static enum ice_status 843 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, 844 struct ice_flow_entry *entry) 845 { 846 if (!entry) 847 return ICE_ERR_BAD_PTR; 848 849 list_del(&entry->l_entry); 850 851 ice_dealloc_flow_entry(hw, entry); 852 853 return 0; 854 } 855 856 /** 857 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields 858 * @hw: pointer to the HW struct 859 * @blk: classification stage 860 * @dir: flow direction 861 * @prof_id: unique ID to identify this flow profile 862 * @segs: array of one or more packet segments that describe the flow 863 * @segs_cnt: number of packet segments provided 864 * @prof: stores the returned flow profile added 865 * 866 * Assumption: the caller has acquired the lock to the profile list 867 */ 868 static enum ice_status 869 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, 870 enum ice_flow_dir dir, u64 prof_id, 871 struct ice_flow_seg_info *segs, u8 segs_cnt, 872 struct ice_flow_prof **prof) 873 { 874 struct ice_flow_prof_params *params; 875 enum ice_status status; 876 u8 i; 877 878 if (!prof) 879 return ICE_ERR_BAD_PTR; 880 881 params = kzalloc(sizeof(*params), GFP_KERNEL); 882 if (!params) 883 return ICE_ERR_NO_MEMORY; 884 885 params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof), 886 GFP_KERNEL); 887 if (!params->prof) { 888 status = ICE_ERR_NO_MEMORY; 889 goto free_params; 890 } 891 892 /* initialize extraction sequence to all invalid (0xff) */ 893 for (i = 0; i < ICE_MAX_FV_WORDS; i++) { 894 params->es[i].prot_id = ICE_PROT_INVALID; 895 params->es[i].off = ICE_FV_OFFSET_INVAL; 896 } 897 898 params->blk = blk; 899 params->prof->id = prof_id; 900 params->prof->dir = dir; 901 params->prof->segs_cnt = segs_cnt; 902 903 /* Make a copy of the segments that need to be persistent in the flow 904 * profile instance 905 */ 906 for (i = 0; i < segs_cnt; i++) 907 memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs)); 908 909 status = ice_flow_proc_segs(hw, params); 910 if (status) { 911 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n"); 912 goto out; 913 } 914 915 /* Add a HW profile for this flow profile */ 916 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, 917 params->es); 918 if (status) { 919 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); 920 goto out; 921 } 922 923 INIT_LIST_HEAD(¶ms->prof->entries); 924 mutex_init(¶ms->prof->entries_lock); 925 *prof = params->prof; 926 927 out: 928 if (status) 929 devm_kfree(ice_hw_to_dev(hw), params->prof); 930 free_params: 931 kfree(params); 932 933 return status; 934 } 935 936 /** 937 * ice_flow_rem_prof_sync - remove a flow profile 938 * @hw: pointer to the hardware structure 939 * @blk: classification stage 940 * @prof: pointer to flow profile to remove 941 * 942 * Assumption: the caller has acquired the lock to the profile list 943 */ 944 static enum ice_status 945 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, 946 struct ice_flow_prof *prof) 947 { 948 enum ice_status status; 949 950 /* Remove all remaining flow entries before removing the flow profile */ 951 if (!list_empty(&prof->entries)) { 952 struct ice_flow_entry *e, *t; 953 954 mutex_lock(&prof->entries_lock); 955 956 list_for_each_entry_safe(e, t, &prof->entries, l_entry) { 957 status = ice_flow_rem_entry_sync(hw, blk, e); 958 if (status) 959 break; 960 } 961 962 mutex_unlock(&prof->entries_lock); 963 } 964 965 /* Remove all hardware profiles associated with this flow profile */ 966 status = ice_rem_prof(hw, blk, prof->id); 967 if (!status) { 968 list_del(&prof->l_entry); 969 mutex_destroy(&prof->entries_lock); 970 devm_kfree(ice_hw_to_dev(hw), prof); 971 } 972 973 return status; 974 } 975 976 /** 977 * ice_flow_assoc_prof - associate a VSI with a flow profile 978 * @hw: pointer to the hardware structure 979 * @blk: classification stage 980 * @prof: pointer to flow profile 981 * @vsi_handle: software VSI handle 982 * 983 * Assumption: the caller has acquired the lock to the profile list 984 * and the software VSI handle has been validated 985 */ 986 static enum ice_status 987 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, 988 struct ice_flow_prof *prof, u16 vsi_handle) 989 { 990 enum ice_status status = 0; 991 992 if (!test_bit(vsi_handle, prof->vsis)) { 993 status = ice_add_prof_id_flow(hw, blk, 994 ice_get_hw_vsi_num(hw, 995 vsi_handle), 996 prof->id); 997 if (!status) 998 set_bit(vsi_handle, prof->vsis); 999 else 1000 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n", 1001 status); 1002 } 1003 1004 return status; 1005 } 1006 1007 /** 1008 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile 1009 * @hw: pointer to the hardware structure 1010 * @blk: classification stage 1011 * @prof: pointer to flow profile 1012 * @vsi_handle: software VSI handle 1013 * 1014 * Assumption: the caller has acquired the lock to the profile list 1015 * and the software VSI handle has been validated 1016 */ 1017 static enum ice_status 1018 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, 1019 struct ice_flow_prof *prof, u16 vsi_handle) 1020 { 1021 enum ice_status status = 0; 1022 1023 if (test_bit(vsi_handle, prof->vsis)) { 1024 status = ice_rem_prof_id_flow(hw, blk, 1025 ice_get_hw_vsi_num(hw, 1026 vsi_handle), 1027 prof->id); 1028 if (!status) 1029 clear_bit(vsi_handle, prof->vsis); 1030 else 1031 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n", 1032 status); 1033 } 1034 1035 return status; 1036 } 1037 1038 /** 1039 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields 1040 * @hw: pointer to the HW struct 1041 * @blk: classification stage 1042 * @dir: flow direction 1043 * @prof_id: unique ID to identify this flow profile 1044 * @segs: array of one or more packet segments that describe the flow 1045 * @segs_cnt: number of packet segments provided 1046 * @prof: stores the returned flow profile added 1047 */ 1048 enum ice_status 1049 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, 1050 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, 1051 struct ice_flow_prof **prof) 1052 { 1053 enum ice_status status; 1054 1055 if (segs_cnt > ICE_FLOW_SEG_MAX) 1056 return ICE_ERR_MAX_LIMIT; 1057 1058 if (!segs_cnt) 1059 return ICE_ERR_PARAM; 1060 1061 if (!segs) 1062 return ICE_ERR_BAD_PTR; 1063 1064 status = ice_flow_val_hdrs(segs, segs_cnt); 1065 if (status) 1066 return status; 1067 1068 mutex_lock(&hw->fl_profs_locks[blk]); 1069 1070 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt, 1071 prof); 1072 if (!status) 1073 list_add(&(*prof)->l_entry, &hw->fl_profs[blk]); 1074 1075 mutex_unlock(&hw->fl_profs_locks[blk]); 1076 1077 return status; 1078 } 1079 1080 /** 1081 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it 1082 * @hw: pointer to the HW struct 1083 * @blk: the block for which the flow profile is to be removed 1084 * @prof_id: unique ID of the flow profile to be removed 1085 */ 1086 enum ice_status 1087 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) 1088 { 1089 struct ice_flow_prof *prof; 1090 enum ice_status status; 1091 1092 mutex_lock(&hw->fl_profs_locks[blk]); 1093 1094 prof = ice_flow_find_prof_id(hw, blk, prof_id); 1095 if (!prof) { 1096 status = ICE_ERR_DOES_NOT_EXIST; 1097 goto out; 1098 } 1099 1100 /* prof becomes invalid after the call */ 1101 status = ice_flow_rem_prof_sync(hw, blk, prof); 1102 1103 out: 1104 mutex_unlock(&hw->fl_profs_locks[blk]); 1105 1106 return status; 1107 } 1108 1109 /** 1110 * ice_flow_add_entry - Add a flow entry 1111 * @hw: pointer to the HW struct 1112 * @blk: classification stage 1113 * @prof_id: ID of the profile to add a new flow entry to 1114 * @entry_id: unique ID to identify this flow entry 1115 * @vsi_handle: software VSI handle for the flow entry 1116 * @prio: priority of the flow entry 1117 * @data: pointer to a data buffer containing flow entry's match values/masks 1118 * @entry_h: pointer to buffer that receives the new flow entry's handle 1119 */ 1120 enum ice_status 1121 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, 1122 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio, 1123 void *data, u64 *entry_h) 1124 { 1125 struct ice_flow_entry *e = NULL; 1126 struct ice_flow_prof *prof; 1127 enum ice_status status; 1128 1129 /* No flow entry data is expected for RSS */ 1130 if (!entry_h || (!data && blk != ICE_BLK_RSS)) 1131 return ICE_ERR_BAD_PTR; 1132 1133 if (!ice_is_vsi_valid(hw, vsi_handle)) 1134 return ICE_ERR_PARAM; 1135 1136 mutex_lock(&hw->fl_profs_locks[blk]); 1137 1138 prof = ice_flow_find_prof_id(hw, blk, prof_id); 1139 if (!prof) { 1140 status = ICE_ERR_DOES_NOT_EXIST; 1141 } else { 1142 /* Allocate memory for the entry being added and associate 1143 * the VSI to the found flow profile 1144 */ 1145 e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL); 1146 if (!e) 1147 status = ICE_ERR_NO_MEMORY; 1148 else 1149 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 1150 } 1151 1152 mutex_unlock(&hw->fl_profs_locks[blk]); 1153 if (status) 1154 goto out; 1155 1156 e->id = entry_id; 1157 e->vsi_handle = vsi_handle; 1158 e->prof = prof; 1159 e->priority = prio; 1160 1161 switch (blk) { 1162 case ICE_BLK_FD: 1163 case ICE_BLK_RSS: 1164 break; 1165 default: 1166 status = ICE_ERR_NOT_IMPL; 1167 goto out; 1168 } 1169 1170 mutex_lock(&prof->entries_lock); 1171 list_add(&e->l_entry, &prof->entries); 1172 mutex_unlock(&prof->entries_lock); 1173 1174 *entry_h = ICE_FLOW_ENTRY_HNDL(e); 1175 1176 out: 1177 if (status && e) { 1178 if (e->entry) 1179 devm_kfree(ice_hw_to_dev(hw), e->entry); 1180 devm_kfree(ice_hw_to_dev(hw), e); 1181 } 1182 1183 return status; 1184 } 1185 1186 /** 1187 * ice_flow_rem_entry - Remove a flow entry 1188 * @hw: pointer to the HW struct 1189 * @blk: classification stage 1190 * @entry_h: handle to the flow entry to be removed 1191 */ 1192 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, 1193 u64 entry_h) 1194 { 1195 struct ice_flow_entry *entry; 1196 struct ice_flow_prof *prof; 1197 enum ice_status status = 0; 1198 1199 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL) 1200 return ICE_ERR_PARAM; 1201 1202 entry = ICE_FLOW_ENTRY_PTR(entry_h); 1203 1204 /* Retain the pointer to the flow profile as the entry will be freed */ 1205 prof = entry->prof; 1206 1207 if (prof) { 1208 mutex_lock(&prof->entries_lock); 1209 status = ice_flow_rem_entry_sync(hw, blk, entry); 1210 mutex_unlock(&prof->entries_lock); 1211 } 1212 1213 return status; 1214 } 1215 1216 /** 1217 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer 1218 * @seg: packet segment the field being set belongs to 1219 * @fld: field to be set 1220 * @field_type: type of the field 1221 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from 1222 * entry's input buffer 1223 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's 1224 * input buffer 1225 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from 1226 * entry's input buffer 1227 * 1228 * This helper function stores information of a field being matched, including 1229 * the type of the field and the locations of the value to match, the mask, and 1230 * the upper-bound value in the start of the input buffer for a flow entry. 1231 * This function should only be used for fixed-size data structures. 1232 * 1233 * This function also opportunistically determines the protocol headers to be 1234 * present based on the fields being set. Some fields cannot be used alone to 1235 * determine the protocol headers present. Sometimes, fields for particular 1236 * protocol headers are not matched. In those cases, the protocol headers 1237 * must be explicitly set. 1238 */ 1239 static void 1240 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld, 1241 enum ice_flow_fld_match_type field_type, u16 val_loc, 1242 u16 mask_loc, u16 last_loc) 1243 { 1244 u64 bit = BIT_ULL(fld); 1245 1246 seg->match |= bit; 1247 if (field_type == ICE_FLOW_FLD_TYPE_RANGE) 1248 seg->range |= bit; 1249 1250 seg->fields[fld].type = field_type; 1251 seg->fields[fld].src.val = val_loc; 1252 seg->fields[fld].src.mask = mask_loc; 1253 seg->fields[fld].src.last = last_loc; 1254 1255 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr); 1256 } 1257 1258 /** 1259 * ice_flow_set_fld - specifies locations of field from entry's input buffer 1260 * @seg: packet segment the field being set belongs to 1261 * @fld: field to be set 1262 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from 1263 * entry's input buffer 1264 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's 1265 * input buffer 1266 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from 1267 * entry's input buffer 1268 * @range: indicate if field being matched is to be in a range 1269 * 1270 * This function specifies the locations, in the form of byte offsets from the 1271 * start of the input buffer for a flow entry, from where the value to match, 1272 * the mask value, and upper value can be extracted. These locations are then 1273 * stored in the flow profile. When adding a flow entry associated with the 1274 * flow profile, these locations will be used to quickly extract the values and 1275 * create the content of a match entry. This function should only be used for 1276 * fixed-size data structures. 1277 */ 1278 void 1279 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, 1280 u16 val_loc, u16 mask_loc, u16 last_loc, bool range) 1281 { 1282 enum ice_flow_fld_match_type t = range ? 1283 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG; 1284 1285 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc); 1286 } 1287 1288 /** 1289 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf 1290 * @seg: packet segment the field being set belongs to 1291 * @off: offset of the raw field from the beginning of the segment in bytes 1292 * @len: length of the raw pattern to be matched 1293 * @val_loc: location of the value to match from entry's input buffer 1294 * @mask_loc: location of mask value from entry's input buffer 1295 * 1296 * This function specifies the offset of the raw field to be match from the 1297 * beginning of the specified packet segment, and the locations, in the form of 1298 * byte offsets from the start of the input buffer for a flow entry, from where 1299 * the value to match and the mask value to be extracted. These locations are 1300 * then stored in the flow profile. When adding flow entries to the associated 1301 * flow profile, these locations can be used to quickly extract the values to 1302 * create the content of a match entry. This function should only be used for 1303 * fixed-size data structures. 1304 */ 1305 void 1306 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, 1307 u16 val_loc, u16 mask_loc) 1308 { 1309 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) { 1310 seg->raws[seg->raws_cnt].off = off; 1311 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE; 1312 seg->raws[seg->raws_cnt].info.src.val = val_loc; 1313 seg->raws[seg->raws_cnt].info.src.mask = mask_loc; 1314 /* The "last" field is used to store the length of the field */ 1315 seg->raws[seg->raws_cnt].info.src.last = len; 1316 } 1317 1318 /* Overflows of "raws" will be handled as an error condition later in 1319 * the flow when this information is processed. 1320 */ 1321 seg->raws_cnt++; 1322 } 1323 1324 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \ 1325 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN) 1326 1327 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \ 1328 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6) 1329 1330 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \ 1331 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP) 1332 1333 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \ 1334 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \ 1335 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \ 1336 ICE_FLOW_RSS_SEG_HDR_L4_MASKS) 1337 1338 /** 1339 * ice_flow_set_rss_seg_info - setup packet segments for RSS 1340 * @segs: pointer to the flow field segment(s) 1341 * @hash_fields: fields to be hashed on for the segment(s) 1342 * @flow_hdr: protocol header fields within a packet segment 1343 * 1344 * Helper function to extract fields from hash bitmap and use flow 1345 * header value to set flow field segment for further use in flow 1346 * profile entry or removal. 1347 */ 1348 static enum ice_status 1349 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, 1350 u32 flow_hdr) 1351 { 1352 u64 val; 1353 u8 i; 1354 1355 for_each_set_bit(i, (unsigned long *)&hash_fields, 1356 ICE_FLOW_FIELD_IDX_MAX) 1357 ice_flow_set_fld(segs, (enum ice_flow_field)i, 1358 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1359 ICE_FLOW_FLD_OFF_INVAL, false); 1360 1361 ICE_FLOW_SET_HDRS(segs, flow_hdr); 1362 1363 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS) 1364 return ICE_ERR_PARAM; 1365 1366 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); 1367 if (val && !is_power_of_2(val)) 1368 return ICE_ERR_CFG; 1369 1370 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); 1371 if (val && !is_power_of_2(val)) 1372 return ICE_ERR_CFG; 1373 1374 return 0; 1375 } 1376 1377 /** 1378 * ice_rem_vsi_rss_list - remove VSI from RSS list 1379 * @hw: pointer to the hardware structure 1380 * @vsi_handle: software VSI handle 1381 * 1382 * Remove the VSI from all RSS configurations in the list. 1383 */ 1384 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle) 1385 { 1386 struct ice_rss_cfg *r, *tmp; 1387 1388 if (list_empty(&hw->rss_list_head)) 1389 return; 1390 1391 mutex_lock(&hw->rss_locks); 1392 list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) 1393 if (test_and_clear_bit(vsi_handle, r->vsis)) 1394 if (bitmap_empty(r->vsis, ICE_MAX_VSI)) { 1395 list_del(&r->l_entry); 1396 devm_kfree(ice_hw_to_dev(hw), r); 1397 } 1398 mutex_unlock(&hw->rss_locks); 1399 } 1400 1401 /** 1402 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI 1403 * @hw: pointer to the hardware structure 1404 * @vsi_handle: software VSI handle 1405 * 1406 * This function will iterate through all flow profiles and disassociate 1407 * the VSI from that profile. If the flow profile has no VSIs it will 1408 * be removed. 1409 */ 1410 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) 1411 { 1412 const enum ice_block blk = ICE_BLK_RSS; 1413 struct ice_flow_prof *p, *t; 1414 enum ice_status status = 0; 1415 1416 if (!ice_is_vsi_valid(hw, vsi_handle)) 1417 return ICE_ERR_PARAM; 1418 1419 if (list_empty(&hw->fl_profs[blk])) 1420 return 0; 1421 1422 mutex_lock(&hw->rss_locks); 1423 list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry) 1424 if (test_bit(vsi_handle, p->vsis)) { 1425 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle); 1426 if (status) 1427 break; 1428 1429 if (bitmap_empty(p->vsis, ICE_MAX_VSI)) { 1430 status = ice_flow_rem_prof(hw, blk, p->id); 1431 if (status) 1432 break; 1433 } 1434 } 1435 mutex_unlock(&hw->rss_locks); 1436 1437 return status; 1438 } 1439 1440 /** 1441 * ice_rem_rss_list - remove RSS configuration from list 1442 * @hw: pointer to the hardware structure 1443 * @vsi_handle: software VSI handle 1444 * @prof: pointer to flow profile 1445 * 1446 * Assumption: lock has already been acquired for RSS list 1447 */ 1448 static void 1449 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) 1450 { 1451 struct ice_rss_cfg *r, *tmp; 1452 1453 /* Search for RSS hash fields associated to the VSI that match the 1454 * hash configurations associated to the flow profile. If found 1455 * remove from the RSS entry list of the VSI context and delete entry. 1456 */ 1457 list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) 1458 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && 1459 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { 1460 clear_bit(vsi_handle, r->vsis); 1461 if (bitmap_empty(r->vsis, ICE_MAX_VSI)) { 1462 list_del(&r->l_entry); 1463 devm_kfree(ice_hw_to_dev(hw), r); 1464 } 1465 return; 1466 } 1467 } 1468 1469 /** 1470 * ice_add_rss_list - add RSS configuration to list 1471 * @hw: pointer to the hardware structure 1472 * @vsi_handle: software VSI handle 1473 * @prof: pointer to flow profile 1474 * 1475 * Assumption: lock has already been acquired for RSS list 1476 */ 1477 static enum ice_status 1478 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) 1479 { 1480 struct ice_rss_cfg *r, *rss_cfg; 1481 1482 list_for_each_entry(r, &hw->rss_list_head, l_entry) 1483 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && 1484 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { 1485 set_bit(vsi_handle, r->vsis); 1486 return 0; 1487 } 1488 1489 rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg), 1490 GFP_KERNEL); 1491 if (!rss_cfg) 1492 return ICE_ERR_NO_MEMORY; 1493 1494 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match; 1495 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs; 1496 set_bit(vsi_handle, rss_cfg->vsis); 1497 1498 list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head); 1499 1500 return 0; 1501 } 1502 1503 #define ICE_FLOW_PROF_HASH_S 0 1504 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S) 1505 #define ICE_FLOW_PROF_HDR_S 32 1506 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S) 1507 #define ICE_FLOW_PROF_ENCAP_S 63 1508 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S)) 1509 1510 #define ICE_RSS_OUTER_HEADERS 1 1511 #define ICE_RSS_INNER_HEADERS 2 1512 1513 /* Flow profile ID format: 1514 * [0:31] - Packet match fields 1515 * [32:62] - Protocol header 1516 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled 1517 */ 1518 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \ 1519 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ 1520 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ 1521 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0)) 1522 1523 /** 1524 * ice_add_rss_cfg_sync - add an RSS configuration 1525 * @hw: pointer to the hardware structure 1526 * @vsi_handle: software VSI handle 1527 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure 1528 * @addl_hdrs: protocol header fields 1529 * @segs_cnt: packet segment count 1530 * 1531 * Assumption: lock has already been acquired for RSS list 1532 */ 1533 static enum ice_status 1534 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, 1535 u32 addl_hdrs, u8 segs_cnt) 1536 { 1537 const enum ice_block blk = ICE_BLK_RSS; 1538 struct ice_flow_prof *prof = NULL; 1539 struct ice_flow_seg_info *segs; 1540 enum ice_status status; 1541 1542 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX) 1543 return ICE_ERR_PARAM; 1544 1545 segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); 1546 if (!segs) 1547 return ICE_ERR_NO_MEMORY; 1548 1549 /* Construct the packet segment info from the hashed fields */ 1550 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, 1551 addl_hdrs); 1552 if (status) 1553 goto exit; 1554 1555 /* Search for a flow profile that has matching headers, hash fields 1556 * and has the input VSI associated to it. If found, no further 1557 * operations required and exit. 1558 */ 1559 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 1560 vsi_handle, 1561 ICE_FLOW_FIND_PROF_CHK_FLDS | 1562 ICE_FLOW_FIND_PROF_CHK_VSI); 1563 if (prof) 1564 goto exit; 1565 1566 /* Check if a flow profile exists with the same protocol headers and 1567 * associated with the input VSI. If so disassociate the VSI from 1568 * this profile. The VSI will be added to a new profile created with 1569 * the protocol header and new hash field configuration. 1570 */ 1571 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 1572 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI); 1573 if (prof) { 1574 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); 1575 if (!status) 1576 ice_rem_rss_list(hw, vsi_handle, prof); 1577 else 1578 goto exit; 1579 1580 /* Remove profile if it has no VSIs associated */ 1581 if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) { 1582 status = ice_flow_rem_prof(hw, blk, prof->id); 1583 if (status) 1584 goto exit; 1585 } 1586 } 1587 1588 /* Search for a profile that has same match fields only. If this 1589 * exists then associate the VSI to this profile. 1590 */ 1591 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 1592 vsi_handle, 1593 ICE_FLOW_FIND_PROF_CHK_FLDS); 1594 if (prof) { 1595 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 1596 if (!status) 1597 status = ice_add_rss_list(hw, vsi_handle, prof); 1598 goto exit; 1599 } 1600 1601 /* Create a new flow profile with generated profile and packet 1602 * segment information. 1603 */ 1604 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX, 1605 ICE_FLOW_GEN_PROFID(hashed_flds, 1606 segs[segs_cnt - 1].hdrs, 1607 segs_cnt), 1608 segs, segs_cnt, &prof); 1609 if (status) 1610 goto exit; 1611 1612 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 1613 /* If association to a new flow profile failed then this profile can 1614 * be removed. 1615 */ 1616 if (status) { 1617 ice_flow_rem_prof(hw, blk, prof->id); 1618 goto exit; 1619 } 1620 1621 status = ice_add_rss_list(hw, vsi_handle, prof); 1622 1623 exit: 1624 kfree(segs); 1625 return status; 1626 } 1627 1628 /** 1629 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields 1630 * @hw: pointer to the hardware structure 1631 * @vsi_handle: software VSI handle 1632 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure 1633 * @addl_hdrs: protocol header fields 1634 * 1635 * This function will generate a flow profile based on fields associated with 1636 * the input fields to hash on, the flow type and use the VSI number to add 1637 * a flow entry to the profile. 1638 */ 1639 enum ice_status 1640 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, 1641 u32 addl_hdrs) 1642 { 1643 enum ice_status status; 1644 1645 if (hashed_flds == ICE_HASH_INVALID || 1646 !ice_is_vsi_valid(hw, vsi_handle)) 1647 return ICE_ERR_PARAM; 1648 1649 mutex_lock(&hw->rss_locks); 1650 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, 1651 ICE_RSS_OUTER_HEADERS); 1652 if (!status) 1653 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, 1654 addl_hdrs, ICE_RSS_INNER_HEADERS); 1655 mutex_unlock(&hw->rss_locks); 1656 1657 return status; 1658 } 1659 1660 /* Mapping of AVF hash bit fields to an L3-L4 hash combination. 1661 * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash, 1662 * convert its values to their appropriate flow L3, L4 values. 1663 */ 1664 #define ICE_FLOW_AVF_RSS_IPV4_MASKS \ 1665 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \ 1666 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4)) 1667 #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \ 1668 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \ 1669 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP)) 1670 #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \ 1671 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \ 1672 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \ 1673 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP)) 1674 #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \ 1675 (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \ 1676 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) 1677 1678 #define ICE_FLOW_AVF_RSS_IPV6_MASKS \ 1679 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \ 1680 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6)) 1681 #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \ 1682 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \ 1683 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \ 1684 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP)) 1685 #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \ 1686 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \ 1687 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP)) 1688 #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \ 1689 (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \ 1690 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) 1691 1692 /** 1693 * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver 1694 * @hw: pointer to the hardware structure 1695 * @vsi_handle: software VSI handle 1696 * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure 1697 * 1698 * This function will take the hash bitmap provided by the AVF driver via a 1699 * message, convert it to ICE-compatible values, and configure RSS flow 1700 * profiles. 1701 */ 1702 enum ice_status 1703 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) 1704 { 1705 enum ice_status status = 0; 1706 u64 hash_flds; 1707 1708 if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || 1709 !ice_is_vsi_valid(hw, vsi_handle)) 1710 return ICE_ERR_PARAM; 1711 1712 /* Make sure no unsupported bits are specified */ 1713 if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS | 1714 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)) 1715 return ICE_ERR_CFG; 1716 1717 hash_flds = avf_hash; 1718 1719 /* Always create an L3 RSS configuration for any L4 RSS configuration */ 1720 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) 1721 hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS; 1722 1723 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) 1724 hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS; 1725 1726 /* Create the corresponding RSS configuration for each valid hash bit */ 1727 while (hash_flds) { 1728 u64 rss_hash = ICE_HASH_INVALID; 1729 1730 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) { 1731 if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) { 1732 rss_hash = ICE_FLOW_HASH_IPV4; 1733 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS; 1734 } else if (hash_flds & 1735 ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) { 1736 rss_hash = ICE_FLOW_HASH_IPV4 | 1737 ICE_FLOW_HASH_TCP_PORT; 1738 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS; 1739 } else if (hash_flds & 1740 ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) { 1741 rss_hash = ICE_FLOW_HASH_IPV4 | 1742 ICE_FLOW_HASH_UDP_PORT; 1743 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS; 1744 } else if (hash_flds & 1745 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) { 1746 rss_hash = ICE_FLOW_HASH_IPV4 | 1747 ICE_FLOW_HASH_SCTP_PORT; 1748 hash_flds &= 1749 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP); 1750 } 1751 } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) { 1752 if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) { 1753 rss_hash = ICE_FLOW_HASH_IPV6; 1754 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS; 1755 } else if (hash_flds & 1756 ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) { 1757 rss_hash = ICE_FLOW_HASH_IPV6 | 1758 ICE_FLOW_HASH_TCP_PORT; 1759 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS; 1760 } else if (hash_flds & 1761 ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) { 1762 rss_hash = ICE_FLOW_HASH_IPV6 | 1763 ICE_FLOW_HASH_UDP_PORT; 1764 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS; 1765 } else if (hash_flds & 1766 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) { 1767 rss_hash = ICE_FLOW_HASH_IPV6 | 1768 ICE_FLOW_HASH_SCTP_PORT; 1769 hash_flds &= 1770 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP); 1771 } 1772 } 1773 1774 if (rss_hash == ICE_HASH_INVALID) 1775 return ICE_ERR_OUT_OF_RANGE; 1776 1777 status = ice_add_rss_cfg(hw, vsi_handle, rss_hash, 1778 ICE_FLOW_SEG_HDR_NONE); 1779 if (status) 1780 break; 1781 } 1782 1783 return status; 1784 } 1785 1786 /** 1787 * ice_replay_rss_cfg - replay RSS configurations associated with VSI 1788 * @hw: pointer to the hardware structure 1789 * @vsi_handle: software VSI handle 1790 */ 1791 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) 1792 { 1793 enum ice_status status = 0; 1794 struct ice_rss_cfg *r; 1795 1796 if (!ice_is_vsi_valid(hw, vsi_handle)) 1797 return ICE_ERR_PARAM; 1798 1799 mutex_lock(&hw->rss_locks); 1800 list_for_each_entry(r, &hw->rss_list_head, l_entry) { 1801 if (test_bit(vsi_handle, r->vsis)) { 1802 status = ice_add_rss_cfg_sync(hw, vsi_handle, 1803 r->hashed_flds, 1804 r->packet_hdr, 1805 ICE_RSS_OUTER_HEADERS); 1806 if (status) 1807 break; 1808 status = ice_add_rss_cfg_sync(hw, vsi_handle, 1809 r->hashed_flds, 1810 r->packet_hdr, 1811 ICE_RSS_INNER_HEADERS); 1812 if (status) 1813 break; 1814 } 1815 } 1816 mutex_unlock(&hw->rss_locks); 1817 1818 return status; 1819 } 1820 1821 /** 1822 * ice_get_rss_cfg - returns hashed fields for the given header types 1823 * @hw: pointer to the hardware structure 1824 * @vsi_handle: software VSI handle 1825 * @hdrs: protocol header type 1826 * 1827 * This function will return the match fields of the first instance of flow 1828 * profile having the given header types and containing input VSI 1829 */ 1830 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) 1831 { 1832 u64 rss_hash = ICE_HASH_INVALID; 1833 struct ice_rss_cfg *r; 1834 1835 /* verify if the protocol header is non zero and VSI is valid */ 1836 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle)) 1837 return ICE_HASH_INVALID; 1838 1839 mutex_lock(&hw->rss_locks); 1840 list_for_each_entry(r, &hw->rss_list_head, l_entry) 1841 if (test_bit(vsi_handle, r->vsis) && 1842 r->packet_hdr == hdrs) { 1843 rss_hash = r->hashed_flds; 1844 break; 1845 } 1846 mutex_unlock(&hw->rss_locks); 1847 1848 return rss_hash; 1849 } 1850