1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_flow.h" 6 7 /* Describe properties of a protocol header field */ 8 struct ice_flow_field_info { 9 enum ice_flow_seg_hdr hdr; 10 s16 off; /* Offset from start of a protocol header, in bits */ 11 u16 size; /* Size of fields in bits */ 12 }; 13 14 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \ 15 .hdr = _hdr, \ 16 .off = (_offset_bytes) * BITS_PER_BYTE, \ 17 .size = (_size_bytes) * BITS_PER_BYTE, \ 18 } 19 20 /* Table containing properties of supported protocol header fields */ 21 static const 22 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { 23 /* IPv4 / IPv6 */ 24 /* ICE_FLOW_FIELD_IDX_IPV4_SA */ 25 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)), 26 /* ICE_FLOW_FIELD_IDX_IPV4_DA */ 27 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, sizeof(struct in_addr)), 28 /* ICE_FLOW_FIELD_IDX_IPV6_SA */ 29 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)), 30 /* ICE_FLOW_FIELD_IDX_IPV6_DA */ 31 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)), 32 /* Transport */ 33 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */ 34 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)), 35 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */ 36 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, sizeof(__be16)), 37 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */ 38 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, sizeof(__be16)), 39 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */ 40 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, sizeof(__be16)), 41 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */ 42 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)), 43 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ 44 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)), 45 /* GRE */ 46 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */ 47 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, 48 sizeof_field(struct gre_full_hdr, key)), 49 }; 50 51 /* Bitmaps indicating relevant packet types for a particular protocol header 52 * 53 * Packet types for packets with an Outer/First/Single IPv4 header 54 */ 55 static const u32 ice_ptypes_ipv4_ofos[] = { 56 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, 57 0x00000000, 0x00000000, 0x00000000, 0x00000000, 58 0x00000000, 0x00000000, 0x00000000, 0x00000000, 59 0x00000000, 0x00000000, 0x00000000, 0x00000000, 60 0x00000000, 0x00000000, 0x00000000, 0x00000000, 61 0x00000000, 0x00000000, 0x00000000, 0x00000000, 62 0x00000000, 0x00000000, 0x00000000, 0x00000000, 63 0x00000000, 0x00000000, 0x00000000, 0x00000000, 64 }; 65 66 /* Packet types for packets with an Innermost/Last IPv4 header */ 67 static const u32 ice_ptypes_ipv4_il[] = { 68 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B, 69 0x0000000E, 0x00000000, 0x00000000, 0x00000000, 70 0x00000000, 0x00000000, 0x00000000, 0x00000000, 71 0x00000000, 0x00000000, 0x00000000, 0x00000000, 72 0x00000000, 0x00000000, 0x00000000, 0x00000000, 73 0x00000000, 0x00000000, 0x00000000, 0x00000000, 74 0x00000000, 0x00000000, 0x00000000, 0x00000000, 75 0x00000000, 0x00000000, 0x00000000, 0x00000000, 76 }; 77 78 /* Packet types for packets with an Outer/First/Single IPv6 header */ 79 static const u32 ice_ptypes_ipv6_ofos[] = { 80 0x00000000, 0x00000000, 0x77000000, 0x10002000, 81 0x00000000, 0x00000000, 0x00000000, 0x00000000, 82 0x00000000, 0x00000000, 0x00000000, 0x00000000, 83 0x00000000, 0x00000000, 0x00000000, 0x00000000, 84 0x00000000, 0x00000000, 0x00000000, 0x00000000, 85 0x00000000, 0x00000000, 0x00000000, 0x00000000, 86 0x00000000, 0x00000000, 0x00000000, 0x00000000, 87 0x00000000, 0x00000000, 0x00000000, 0x00000000, 88 }; 89 90 /* Packet types for packets with an Innermost/Last IPv6 header */ 91 static const u32 ice_ptypes_ipv6_il[] = { 92 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000, 93 0x00000770, 0x00000000, 0x00000000, 0x00000000, 94 0x00000000, 0x00000000, 0x00000000, 0x00000000, 95 0x00000000, 0x00000000, 0x00000000, 0x00000000, 96 0x00000000, 0x00000000, 0x00000000, 0x00000000, 97 0x00000000, 0x00000000, 0x00000000, 0x00000000, 98 0x00000000, 0x00000000, 0x00000000, 0x00000000, 99 0x00000000, 0x00000000, 0x00000000, 0x00000000, 100 }; 101 102 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */ 103 static const u32 ice_ipv4_ofos_no_l4[] = { 104 0x10C00000, 0x04000800, 0x00000000, 0x00000000, 105 0x00000000, 0x00000000, 0x00000000, 0x00000000, 106 0x00000000, 0x00000000, 0x00000000, 0x00000000, 107 0x00000000, 0x00000000, 0x00000000, 0x00000000, 108 0x00000000, 0x00000000, 0x00000000, 0x00000000, 109 0x00000000, 0x00000000, 0x00000000, 0x00000000, 110 0x00000000, 0x00000000, 0x00000000, 0x00000000, 111 0x00000000, 0x00000000, 0x00000000, 0x00000000, 112 }; 113 114 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */ 115 static const u32 ice_ipv4_il_no_l4[] = { 116 0x60000000, 0x18043008, 0x80000002, 0x6010c021, 117 0x00000008, 0x00000000, 0x00000000, 0x00000000, 118 0x00000000, 0x00000000, 0x00000000, 0x00000000, 119 0x00000000, 0x00000000, 0x00000000, 0x00000000, 120 0x00000000, 0x00000000, 0x00000000, 0x00000000, 121 0x00000000, 0x00000000, 0x00000000, 0x00000000, 122 0x00000000, 0x00000000, 0x00000000, 0x00000000, 123 0x00000000, 0x00000000, 0x00000000, 0x00000000, 124 }; 125 126 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */ 127 static const u32 ice_ipv6_ofos_no_l4[] = { 128 0x00000000, 0x00000000, 0x43000000, 0x10002000, 129 0x00000000, 0x00000000, 0x00000000, 0x00000000, 130 0x00000000, 0x00000000, 0x00000000, 0x00000000, 131 0x00000000, 0x00000000, 0x00000000, 0x00000000, 132 0x00000000, 0x00000000, 0x00000000, 0x00000000, 133 0x00000000, 0x00000000, 0x00000000, 0x00000000, 134 0x00000000, 0x00000000, 0x00000000, 0x00000000, 135 0x00000000, 0x00000000, 0x00000000, 0x00000000, 136 }; 137 138 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */ 139 static const u32 ice_ipv6_il_no_l4[] = { 140 0x00000000, 0x02180430, 0x0000010c, 0x086010c0, 141 0x00000430, 0x00000000, 0x00000000, 0x00000000, 142 0x00000000, 0x00000000, 0x00000000, 0x00000000, 143 0x00000000, 0x00000000, 0x00000000, 0x00000000, 144 0x00000000, 0x00000000, 0x00000000, 0x00000000, 145 0x00000000, 0x00000000, 0x00000000, 0x00000000, 146 0x00000000, 0x00000000, 0x00000000, 0x00000000, 147 0x00000000, 0x00000000, 0x00000000, 0x00000000, 148 }; 149 150 /* UDP Packet types for non-tunneled packets or tunneled 151 * packets with inner UDP. 152 */ 153 static const u32 ice_ptypes_udp_il[] = { 154 0x81000000, 0x20204040, 0x04000010, 0x80810102, 155 0x00000040, 0x00000000, 0x00000000, 0x00000000, 156 0x00000000, 0x00000000, 0x00000000, 0x00000000, 157 0x00000000, 0x00000000, 0x00000000, 0x00000000, 158 0x00000000, 0x00000000, 0x00000000, 0x00000000, 159 0x00000000, 0x00000000, 0x00000000, 0x00000000, 160 0x00000000, 0x00000000, 0x00000000, 0x00000000, 161 0x00000000, 0x00000000, 0x00000000, 0x00000000, 162 }; 163 164 /* Packet types for packets with an Innermost/Last TCP header */ 165 static const u32 ice_ptypes_tcp_il[] = { 166 0x04000000, 0x80810102, 0x10000040, 0x02040408, 167 0x00000102, 0x00000000, 0x00000000, 0x00000000, 168 0x00000000, 0x00000000, 0x00000000, 0x00000000, 169 0x00000000, 0x00000000, 0x00000000, 0x00000000, 170 0x00000000, 0x00000000, 0x00000000, 0x00000000, 171 0x00000000, 0x00000000, 0x00000000, 0x00000000, 172 0x00000000, 0x00000000, 0x00000000, 0x00000000, 173 0x00000000, 0x00000000, 0x00000000, 0x00000000, 174 }; 175 176 /* Packet types for packets with an Innermost/Last SCTP header */ 177 static const u32 ice_ptypes_sctp_il[] = { 178 0x08000000, 0x01020204, 0x20000081, 0x04080810, 179 0x00000204, 0x00000000, 0x00000000, 0x00000000, 180 0x00000000, 0x00000000, 0x00000000, 0x00000000, 181 0x00000000, 0x00000000, 0x00000000, 0x00000000, 182 0x00000000, 0x00000000, 0x00000000, 0x00000000, 183 0x00000000, 0x00000000, 0x00000000, 0x00000000, 184 0x00000000, 0x00000000, 0x00000000, 0x00000000, 185 0x00000000, 0x00000000, 0x00000000, 0x00000000, 186 }; 187 188 /* Packet types for packets with an Outermost/First GRE header */ 189 static const u32 ice_ptypes_gre_of[] = { 190 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000, 191 0x0000017E, 0x00000000, 0x00000000, 0x00000000, 192 0x00000000, 0x00000000, 0x00000000, 0x00000000, 193 0x00000000, 0x00000000, 0x00000000, 0x00000000, 194 0x00000000, 0x00000000, 0x00000000, 0x00000000, 195 0x00000000, 0x00000000, 0x00000000, 0x00000000, 196 0x00000000, 0x00000000, 0x00000000, 0x00000000, 197 0x00000000, 0x00000000, 0x00000000, 0x00000000, 198 }; 199 200 /* Manage parameters and info. used during the creation of a flow profile */ 201 struct ice_flow_prof_params { 202 enum ice_block blk; 203 u16 entry_length; /* # of bytes formatted entry will require */ 204 u8 es_cnt; 205 struct ice_flow_prof *prof; 206 207 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0 208 * This will give us the direction flags. 209 */ 210 struct ice_fv_word es[ICE_MAX_FV_WORDS]; 211 DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); 212 }; 213 214 #define ICE_FLOW_SEG_HDRS_L3_MASK \ 215 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6) 216 #define ICE_FLOW_SEG_HDRS_L4_MASK \ 217 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP) 218 219 /** 220 * ice_flow_val_hdrs - validates packet segments for valid protocol headers 221 * @segs: array of one or more packet segments that describe the flow 222 * @segs_cnt: number of packet segments provided 223 */ 224 static enum ice_status 225 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) 226 { 227 u8 i; 228 229 for (i = 0; i < segs_cnt; i++) { 230 /* Multiple L3 headers */ 231 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && 232 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) 233 return ICE_ERR_PARAM; 234 235 /* Multiple L4 headers */ 236 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && 237 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) 238 return ICE_ERR_PARAM; 239 } 240 241 return 0; 242 } 243 244 /* Sizes of fixed known protocol headers without header options */ 245 #define ICE_FLOW_PROT_HDR_SZ_MAC 14 246 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20 247 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40 248 #define ICE_FLOW_PROT_HDR_SZ_TCP 20 249 #define ICE_FLOW_PROT_HDR_SZ_UDP 8 250 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12 251 252 /** 253 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers 254 * @params: information about the flow to be processed 255 * @seg: index of packet segment whose header size is to be determined 256 */ 257 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg) 258 { 259 u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC; 260 261 /* L3 headers */ 262 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) 263 sz += ICE_FLOW_PROT_HDR_SZ_IPV4; 264 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) 265 sz += ICE_FLOW_PROT_HDR_SZ_IPV6; 266 267 /* L4 headers */ 268 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP) 269 sz += ICE_FLOW_PROT_HDR_SZ_TCP; 270 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP) 271 sz += ICE_FLOW_PROT_HDR_SZ_UDP; 272 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP) 273 sz += ICE_FLOW_PROT_HDR_SZ_SCTP; 274 275 return sz; 276 } 277 278 /** 279 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments 280 * @params: information about the flow to be processed 281 * 282 * This function identifies the packet types associated with the protocol 283 * headers being present in packet segments of the specified flow profile. 284 */ 285 static enum ice_status 286 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) 287 { 288 struct ice_flow_prof *prof; 289 u8 i; 290 291 memset(params->ptypes, 0xff, sizeof(params->ptypes)); 292 293 prof = params->prof; 294 295 for (i = 0; i < params->prof->segs_cnt; i++) { 296 const unsigned long *src; 297 u32 hdrs; 298 299 hdrs = prof->segs[i].hdrs; 300 301 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) && 302 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) { 303 src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 : 304 (const unsigned long *)ice_ipv4_il_no_l4; 305 bitmap_and(params->ptypes, params->ptypes, src, 306 ICE_FLOW_PTYPE_MAX); 307 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) { 308 src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos : 309 (const unsigned long *)ice_ptypes_ipv4_il; 310 bitmap_and(params->ptypes, params->ptypes, src, 311 ICE_FLOW_PTYPE_MAX); 312 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) && 313 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) { 314 src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 : 315 (const unsigned long *)ice_ipv6_il_no_l4; 316 bitmap_and(params->ptypes, params->ptypes, src, 317 ICE_FLOW_PTYPE_MAX); 318 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) { 319 src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos : 320 (const unsigned long *)ice_ptypes_ipv6_il; 321 bitmap_and(params->ptypes, params->ptypes, src, 322 ICE_FLOW_PTYPE_MAX); 323 } 324 325 if (hdrs & ICE_FLOW_SEG_HDR_UDP) { 326 src = (const unsigned long *)ice_ptypes_udp_il; 327 bitmap_and(params->ptypes, params->ptypes, src, 328 ICE_FLOW_PTYPE_MAX); 329 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) { 330 bitmap_and(params->ptypes, params->ptypes, 331 (const unsigned long *)ice_ptypes_tcp_il, 332 ICE_FLOW_PTYPE_MAX); 333 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) { 334 src = (const unsigned long *)ice_ptypes_sctp_il; 335 bitmap_and(params->ptypes, params->ptypes, src, 336 ICE_FLOW_PTYPE_MAX); 337 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) { 338 if (!i) { 339 src = (const unsigned long *)ice_ptypes_gre_of; 340 bitmap_and(params->ptypes, params->ptypes, 341 src, ICE_FLOW_PTYPE_MAX); 342 } 343 } 344 } 345 346 return 0; 347 } 348 349 /** 350 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field 351 * @hw: pointer to the HW struct 352 * @params: information about the flow to be processed 353 * @seg: packet segment index of the field to be extracted 354 * @fld: ID of field to be extracted 355 * 356 * This function determines the protocol ID, offset, and size of the given 357 * field. It then allocates one or more extraction sequence entries for the 358 * given field, and fill the entries with protocol ID and offset information. 359 */ 360 static enum ice_status 361 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, 362 u8 seg, enum ice_flow_field fld) 363 { 364 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL; 365 u8 fv_words = hw->blk[params->blk].es.fvw; 366 struct ice_flow_fld_info *flds; 367 u16 cnt, ese_bits, i; 368 u16 off; 369 370 flds = params->prof->segs[seg].fields; 371 372 switch (fld) { 373 case ICE_FLOW_FIELD_IDX_IPV4_SA: 374 case ICE_FLOW_FIELD_IDX_IPV4_DA: 375 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; 376 break; 377 case ICE_FLOW_FIELD_IDX_IPV6_SA: 378 case ICE_FLOW_FIELD_IDX_IPV6_DA: 379 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; 380 break; 381 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT: 382 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT: 383 prot_id = ICE_PROT_TCP_IL; 384 break; 385 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT: 386 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT: 387 prot_id = ICE_PROT_UDP_IL_OR_S; 388 break; 389 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT: 390 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT: 391 prot_id = ICE_PROT_SCTP_IL; 392 break; 393 case ICE_FLOW_FIELD_IDX_GRE_KEYID: 394 prot_id = ICE_PROT_GRE_OF; 395 break; 396 default: 397 return ICE_ERR_NOT_IMPL; 398 } 399 400 /* Each extraction sequence entry is a word in size, and extracts a 401 * word-aligned offset from a protocol header. 402 */ 403 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE; 404 405 flds[fld].xtrct.prot_id = prot_id; 406 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) * 407 ICE_FLOW_FV_EXTRACT_SZ; 408 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits); 409 flds[fld].xtrct.idx = params->es_cnt; 410 411 /* Adjust the next field-entry index after accommodating the number of 412 * entries this field consumes 413 */ 414 cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size, 415 ese_bits); 416 417 /* Fill in the extraction sequence entries needed for this field */ 418 off = flds[fld].xtrct.off; 419 for (i = 0; i < cnt; i++) { 420 u8 idx; 421 422 /* Make sure the number of extraction sequence required 423 * does not exceed the block's capability 424 */ 425 if (params->es_cnt >= fv_words) 426 return ICE_ERR_MAX_LIMIT; 427 428 /* some blocks require a reversed field vector layout */ 429 if (hw->blk[params->blk].es.reverse) 430 idx = fv_words - params->es_cnt - 1; 431 else 432 idx = params->es_cnt; 433 434 params->es[idx].prot_id = prot_id; 435 params->es[idx].off = off; 436 params->es_cnt++; 437 438 off += ICE_FLOW_FV_EXTRACT_SZ; 439 } 440 441 return 0; 442 } 443 444 /** 445 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes 446 * @hw: pointer to the HW struct 447 * @params: information about the flow to be processed 448 * @seg: index of packet segment whose raw fields are to be extracted 449 */ 450 static enum ice_status 451 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, 452 u8 seg) 453 { 454 u16 fv_words; 455 u16 hdrs_sz; 456 u8 i; 457 458 if (!params->prof->segs[seg].raws_cnt) 459 return 0; 460 461 if (params->prof->segs[seg].raws_cnt > 462 ARRAY_SIZE(params->prof->segs[seg].raws)) 463 return ICE_ERR_MAX_LIMIT; 464 465 /* Offsets within the segment headers are not supported */ 466 hdrs_sz = ice_flow_calc_seg_sz(params, seg); 467 if (!hdrs_sz) 468 return ICE_ERR_PARAM; 469 470 fv_words = hw->blk[params->blk].es.fvw; 471 472 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) { 473 struct ice_flow_seg_fld_raw *raw; 474 u16 off, cnt, j; 475 476 raw = ¶ms->prof->segs[seg].raws[i]; 477 478 /* Storing extraction information */ 479 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S; 480 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) * 481 ICE_FLOW_FV_EXTRACT_SZ; 482 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) * 483 BITS_PER_BYTE; 484 raw->info.xtrct.idx = params->es_cnt; 485 486 /* Determine the number of field vector entries this raw field 487 * consumes. 488 */ 489 cnt = DIV_ROUND_UP(raw->info.xtrct.disp + 490 (raw->info.src.last * BITS_PER_BYTE), 491 (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE)); 492 off = raw->info.xtrct.off; 493 for (j = 0; j < cnt; j++) { 494 u16 idx; 495 496 /* Make sure the number of extraction sequence required 497 * does not exceed the block's capability 498 */ 499 if (params->es_cnt >= hw->blk[params->blk].es.count || 500 params->es_cnt >= ICE_MAX_FV_WORDS) 501 return ICE_ERR_MAX_LIMIT; 502 503 /* some blocks require a reversed field vector layout */ 504 if (hw->blk[params->blk].es.reverse) 505 idx = fv_words - params->es_cnt - 1; 506 else 507 idx = params->es_cnt; 508 509 params->es[idx].prot_id = raw->info.xtrct.prot_id; 510 params->es[idx].off = off; 511 params->es_cnt++; 512 off += ICE_FLOW_FV_EXTRACT_SZ; 513 } 514 } 515 516 return 0; 517 } 518 519 /** 520 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments 521 * @hw: pointer to the HW struct 522 * @params: information about the flow to be processed 523 * 524 * This function iterates through all matched fields in the given segments, and 525 * creates an extraction sequence for the fields. 526 */ 527 static enum ice_status 528 ice_flow_create_xtrct_seq(struct ice_hw *hw, 529 struct ice_flow_prof_params *params) 530 { 531 struct ice_flow_prof *prof = params->prof; 532 enum ice_status status = 0; 533 u8 i; 534 535 for (i = 0; i < prof->segs_cnt; i++) { 536 u8 j; 537 538 for_each_set_bit(j, (unsigned long *)&prof->segs[i].match, 539 ICE_FLOW_FIELD_IDX_MAX) { 540 status = ice_flow_xtract_fld(hw, params, i, 541 (enum ice_flow_field)j); 542 if (status) 543 return status; 544 } 545 546 /* Process raw matching bytes */ 547 status = ice_flow_xtract_raws(hw, params, i); 548 if (status) 549 return status; 550 } 551 552 return status; 553 } 554 555 /** 556 * ice_flow_proc_segs - process all packet segments associated with a profile 557 * @hw: pointer to the HW struct 558 * @params: information about the flow to be processed 559 */ 560 static enum ice_status 561 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) 562 { 563 enum ice_status status; 564 565 status = ice_flow_proc_seg_hdrs(params); 566 if (status) 567 return status; 568 569 status = ice_flow_create_xtrct_seq(hw, params); 570 if (status) 571 return status; 572 573 switch (params->blk) { 574 case ICE_BLK_FD: 575 case ICE_BLK_RSS: 576 status = 0; 577 break; 578 default: 579 return ICE_ERR_NOT_IMPL; 580 } 581 582 return status; 583 } 584 585 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001 586 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002 587 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004 588 589 /** 590 * ice_flow_find_prof_conds - Find a profile matching headers and conditions 591 * @hw: pointer to the HW struct 592 * @blk: classification stage 593 * @dir: flow direction 594 * @segs: array of one or more packet segments that describe the flow 595 * @segs_cnt: number of packet segments provided 596 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI) 597 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*) 598 */ 599 static struct ice_flow_prof * 600 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk, 601 enum ice_flow_dir dir, struct ice_flow_seg_info *segs, 602 u8 segs_cnt, u16 vsi_handle, u32 conds) 603 { 604 struct ice_flow_prof *p, *prof = NULL; 605 606 mutex_lock(&hw->fl_profs_locks[blk]); 607 list_for_each_entry(p, &hw->fl_profs[blk], l_entry) 608 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) && 609 segs_cnt && segs_cnt == p->segs_cnt) { 610 u8 i; 611 612 /* Check for profile-VSI association if specified */ 613 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) && 614 ice_is_vsi_valid(hw, vsi_handle) && 615 !test_bit(vsi_handle, p->vsis)) 616 continue; 617 618 /* Protocol headers must be checked. Matched fields are 619 * checked if specified. 620 */ 621 for (i = 0; i < segs_cnt; i++) 622 if (segs[i].hdrs != p->segs[i].hdrs || 623 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) && 624 segs[i].match != p->segs[i].match)) 625 break; 626 627 /* A match is found if all segments are matched */ 628 if (i == segs_cnt) { 629 prof = p; 630 break; 631 } 632 } 633 mutex_unlock(&hw->fl_profs_locks[blk]); 634 635 return prof; 636 } 637 638 /** 639 * ice_flow_find_prof_id - Look up a profile with given profile ID 640 * @hw: pointer to the HW struct 641 * @blk: classification stage 642 * @prof_id: unique ID to identify this flow profile 643 */ 644 static struct ice_flow_prof * 645 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id) 646 { 647 struct ice_flow_prof *p; 648 649 list_for_each_entry(p, &hw->fl_profs[blk], l_entry) 650 if (p->id == prof_id) 651 return p; 652 653 return NULL; 654 } 655 656 /** 657 * ice_dealloc_flow_entry - Deallocate flow entry memory 658 * @hw: pointer to the HW struct 659 * @entry: flow entry to be removed 660 */ 661 static void 662 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry) 663 { 664 if (!entry) 665 return; 666 667 if (entry->entry) 668 devm_kfree(ice_hw_to_dev(hw), entry->entry); 669 670 devm_kfree(ice_hw_to_dev(hw), entry); 671 } 672 673 /** 674 * ice_flow_rem_entry_sync - Remove a flow entry 675 * @hw: pointer to the HW struct 676 * @blk: classification stage 677 * @entry: flow entry to be removed 678 */ 679 static enum ice_status 680 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, 681 struct ice_flow_entry *entry) 682 { 683 if (!entry) 684 return ICE_ERR_BAD_PTR; 685 686 list_del(&entry->l_entry); 687 688 ice_dealloc_flow_entry(hw, entry); 689 690 return 0; 691 } 692 693 /** 694 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields 695 * @hw: pointer to the HW struct 696 * @blk: classification stage 697 * @dir: flow direction 698 * @prof_id: unique ID to identify this flow profile 699 * @segs: array of one or more packet segments that describe the flow 700 * @segs_cnt: number of packet segments provided 701 * @prof: stores the returned flow profile added 702 * 703 * Assumption: the caller has acquired the lock to the profile list 704 */ 705 static enum ice_status 706 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, 707 enum ice_flow_dir dir, u64 prof_id, 708 struct ice_flow_seg_info *segs, u8 segs_cnt, 709 struct ice_flow_prof **prof) 710 { 711 struct ice_flow_prof_params *params; 712 enum ice_status status; 713 u8 i; 714 715 if (!prof) 716 return ICE_ERR_BAD_PTR; 717 718 params = kzalloc(sizeof(*params), GFP_KERNEL); 719 if (!params) 720 return ICE_ERR_NO_MEMORY; 721 722 params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof), 723 GFP_KERNEL); 724 if (!params->prof) { 725 status = ICE_ERR_NO_MEMORY; 726 goto free_params; 727 } 728 729 /* initialize extraction sequence to all invalid (0xff) */ 730 for (i = 0; i < ICE_MAX_FV_WORDS; i++) { 731 params->es[i].prot_id = ICE_PROT_INVALID; 732 params->es[i].off = ICE_FV_OFFSET_INVAL; 733 } 734 735 params->blk = blk; 736 params->prof->id = prof_id; 737 params->prof->dir = dir; 738 params->prof->segs_cnt = segs_cnt; 739 740 /* Make a copy of the segments that need to be persistent in the flow 741 * profile instance 742 */ 743 for (i = 0; i < segs_cnt; i++) 744 memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs)); 745 746 status = ice_flow_proc_segs(hw, params); 747 if (status) { 748 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n"); 749 goto out; 750 } 751 752 /* Add a HW profile for this flow profile */ 753 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, 754 params->es); 755 if (status) { 756 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); 757 goto out; 758 } 759 760 INIT_LIST_HEAD(¶ms->prof->entries); 761 mutex_init(¶ms->prof->entries_lock); 762 *prof = params->prof; 763 764 out: 765 if (status) 766 devm_kfree(ice_hw_to_dev(hw), params->prof); 767 free_params: 768 kfree(params); 769 770 return status; 771 } 772 773 /** 774 * ice_flow_rem_prof_sync - remove a flow profile 775 * @hw: pointer to the hardware structure 776 * @blk: classification stage 777 * @prof: pointer to flow profile to remove 778 * 779 * Assumption: the caller has acquired the lock to the profile list 780 */ 781 static enum ice_status 782 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, 783 struct ice_flow_prof *prof) 784 { 785 enum ice_status status; 786 787 /* Remove all remaining flow entries before removing the flow profile */ 788 if (!list_empty(&prof->entries)) { 789 struct ice_flow_entry *e, *t; 790 791 mutex_lock(&prof->entries_lock); 792 793 list_for_each_entry_safe(e, t, &prof->entries, l_entry) { 794 status = ice_flow_rem_entry_sync(hw, blk, e); 795 if (status) 796 break; 797 } 798 799 mutex_unlock(&prof->entries_lock); 800 } 801 802 /* Remove all hardware profiles associated with this flow profile */ 803 status = ice_rem_prof(hw, blk, prof->id); 804 if (!status) { 805 list_del(&prof->l_entry); 806 mutex_destroy(&prof->entries_lock); 807 devm_kfree(ice_hw_to_dev(hw), prof); 808 } 809 810 return status; 811 } 812 813 /** 814 * ice_flow_assoc_prof - associate a VSI with a flow profile 815 * @hw: pointer to the hardware structure 816 * @blk: classification stage 817 * @prof: pointer to flow profile 818 * @vsi_handle: software VSI handle 819 * 820 * Assumption: the caller has acquired the lock to the profile list 821 * and the software VSI handle has been validated 822 */ 823 static enum ice_status 824 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, 825 struct ice_flow_prof *prof, u16 vsi_handle) 826 { 827 enum ice_status status = 0; 828 829 if (!test_bit(vsi_handle, prof->vsis)) { 830 status = ice_add_prof_id_flow(hw, blk, 831 ice_get_hw_vsi_num(hw, 832 vsi_handle), 833 prof->id); 834 if (!status) 835 set_bit(vsi_handle, prof->vsis); 836 else 837 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n", 838 status); 839 } 840 841 return status; 842 } 843 844 /** 845 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile 846 * @hw: pointer to the hardware structure 847 * @blk: classification stage 848 * @prof: pointer to flow profile 849 * @vsi_handle: software VSI handle 850 * 851 * Assumption: the caller has acquired the lock to the profile list 852 * and the software VSI handle has been validated 853 */ 854 static enum ice_status 855 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, 856 struct ice_flow_prof *prof, u16 vsi_handle) 857 { 858 enum ice_status status = 0; 859 860 if (test_bit(vsi_handle, prof->vsis)) { 861 status = ice_rem_prof_id_flow(hw, blk, 862 ice_get_hw_vsi_num(hw, 863 vsi_handle), 864 prof->id); 865 if (!status) 866 clear_bit(vsi_handle, prof->vsis); 867 else 868 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n", 869 status); 870 } 871 872 return status; 873 } 874 875 /** 876 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields 877 * @hw: pointer to the HW struct 878 * @blk: classification stage 879 * @dir: flow direction 880 * @prof_id: unique ID to identify this flow profile 881 * @segs: array of one or more packet segments that describe the flow 882 * @segs_cnt: number of packet segments provided 883 * @prof: stores the returned flow profile added 884 */ 885 enum ice_status 886 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, 887 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, 888 struct ice_flow_prof **prof) 889 { 890 enum ice_status status; 891 892 if (segs_cnt > ICE_FLOW_SEG_MAX) 893 return ICE_ERR_MAX_LIMIT; 894 895 if (!segs_cnt) 896 return ICE_ERR_PARAM; 897 898 if (!segs) 899 return ICE_ERR_BAD_PTR; 900 901 status = ice_flow_val_hdrs(segs, segs_cnt); 902 if (status) 903 return status; 904 905 mutex_lock(&hw->fl_profs_locks[blk]); 906 907 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt, 908 prof); 909 if (!status) 910 list_add(&(*prof)->l_entry, &hw->fl_profs[blk]); 911 912 mutex_unlock(&hw->fl_profs_locks[blk]); 913 914 return status; 915 } 916 917 /** 918 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it 919 * @hw: pointer to the HW struct 920 * @blk: the block for which the flow profile is to be removed 921 * @prof_id: unique ID of the flow profile to be removed 922 */ 923 enum ice_status 924 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) 925 { 926 struct ice_flow_prof *prof; 927 enum ice_status status; 928 929 mutex_lock(&hw->fl_profs_locks[blk]); 930 931 prof = ice_flow_find_prof_id(hw, blk, prof_id); 932 if (!prof) { 933 status = ICE_ERR_DOES_NOT_EXIST; 934 goto out; 935 } 936 937 /* prof becomes invalid after the call */ 938 status = ice_flow_rem_prof_sync(hw, blk, prof); 939 940 out: 941 mutex_unlock(&hw->fl_profs_locks[blk]); 942 943 return status; 944 } 945 946 /** 947 * ice_flow_add_entry - Add a flow entry 948 * @hw: pointer to the HW struct 949 * @blk: classification stage 950 * @prof_id: ID of the profile to add a new flow entry to 951 * @entry_id: unique ID to identify this flow entry 952 * @vsi_handle: software VSI handle for the flow entry 953 * @prio: priority of the flow entry 954 * @data: pointer to a data buffer containing flow entry's match values/masks 955 * @entry_h: pointer to buffer that receives the new flow entry's handle 956 */ 957 enum ice_status 958 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, 959 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio, 960 void *data, u64 *entry_h) 961 { 962 struct ice_flow_entry *e = NULL; 963 struct ice_flow_prof *prof; 964 enum ice_status status; 965 966 /* No flow entry data is expected for RSS */ 967 if (!entry_h || (!data && blk != ICE_BLK_RSS)) 968 return ICE_ERR_BAD_PTR; 969 970 if (!ice_is_vsi_valid(hw, vsi_handle)) 971 return ICE_ERR_PARAM; 972 973 mutex_lock(&hw->fl_profs_locks[blk]); 974 975 prof = ice_flow_find_prof_id(hw, blk, prof_id); 976 if (!prof) { 977 status = ICE_ERR_DOES_NOT_EXIST; 978 } else { 979 /* Allocate memory for the entry being added and associate 980 * the VSI to the found flow profile 981 */ 982 e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL); 983 if (!e) 984 status = ICE_ERR_NO_MEMORY; 985 else 986 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 987 } 988 989 mutex_unlock(&hw->fl_profs_locks[blk]); 990 if (status) 991 goto out; 992 993 e->id = entry_id; 994 e->vsi_handle = vsi_handle; 995 e->prof = prof; 996 e->priority = prio; 997 998 switch (blk) { 999 case ICE_BLK_FD: 1000 case ICE_BLK_RSS: 1001 break; 1002 default: 1003 status = ICE_ERR_NOT_IMPL; 1004 goto out; 1005 } 1006 1007 mutex_lock(&prof->entries_lock); 1008 list_add(&e->l_entry, &prof->entries); 1009 mutex_unlock(&prof->entries_lock); 1010 1011 *entry_h = ICE_FLOW_ENTRY_HNDL(e); 1012 1013 out: 1014 if (status && e) { 1015 if (e->entry) 1016 devm_kfree(ice_hw_to_dev(hw), e->entry); 1017 devm_kfree(ice_hw_to_dev(hw), e); 1018 } 1019 1020 return status; 1021 } 1022 1023 /** 1024 * ice_flow_rem_entry - Remove a flow entry 1025 * @hw: pointer to the HW struct 1026 * @blk: classification stage 1027 * @entry_h: handle to the flow entry to be removed 1028 */ 1029 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, 1030 u64 entry_h) 1031 { 1032 struct ice_flow_entry *entry; 1033 struct ice_flow_prof *prof; 1034 enum ice_status status = 0; 1035 1036 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL) 1037 return ICE_ERR_PARAM; 1038 1039 entry = ICE_FLOW_ENTRY_PTR(entry_h); 1040 1041 /* Retain the pointer to the flow profile as the entry will be freed */ 1042 prof = entry->prof; 1043 1044 if (prof) { 1045 mutex_lock(&prof->entries_lock); 1046 status = ice_flow_rem_entry_sync(hw, blk, entry); 1047 mutex_unlock(&prof->entries_lock); 1048 } 1049 1050 return status; 1051 } 1052 1053 /** 1054 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer 1055 * @seg: packet segment the field being set belongs to 1056 * @fld: field to be set 1057 * @field_type: type of the field 1058 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from 1059 * entry's input buffer 1060 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's 1061 * input buffer 1062 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from 1063 * entry's input buffer 1064 * 1065 * This helper function stores information of a field being matched, including 1066 * the type of the field and the locations of the value to match, the mask, and 1067 * the upper-bound value in the start of the input buffer for a flow entry. 1068 * This function should only be used for fixed-size data structures. 1069 * 1070 * This function also opportunistically determines the protocol headers to be 1071 * present based on the fields being set. Some fields cannot be used alone to 1072 * determine the protocol headers present. Sometimes, fields for particular 1073 * protocol headers are not matched. In those cases, the protocol headers 1074 * must be explicitly set. 1075 */ 1076 static void 1077 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld, 1078 enum ice_flow_fld_match_type field_type, u16 val_loc, 1079 u16 mask_loc, u16 last_loc) 1080 { 1081 u64 bit = BIT_ULL(fld); 1082 1083 seg->match |= bit; 1084 if (field_type == ICE_FLOW_FLD_TYPE_RANGE) 1085 seg->range |= bit; 1086 1087 seg->fields[fld].type = field_type; 1088 seg->fields[fld].src.val = val_loc; 1089 seg->fields[fld].src.mask = mask_loc; 1090 seg->fields[fld].src.last = last_loc; 1091 1092 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr); 1093 } 1094 1095 /** 1096 * ice_flow_set_fld - specifies locations of field from entry's input buffer 1097 * @seg: packet segment the field being set belongs to 1098 * @fld: field to be set 1099 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from 1100 * entry's input buffer 1101 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's 1102 * input buffer 1103 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from 1104 * entry's input buffer 1105 * @range: indicate if field being matched is to be in a range 1106 * 1107 * This function specifies the locations, in the form of byte offsets from the 1108 * start of the input buffer for a flow entry, from where the value to match, 1109 * the mask value, and upper value can be extracted. These locations are then 1110 * stored in the flow profile. When adding a flow entry associated with the 1111 * flow profile, these locations will be used to quickly extract the values and 1112 * create the content of a match entry. This function should only be used for 1113 * fixed-size data structures. 1114 */ 1115 void 1116 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, 1117 u16 val_loc, u16 mask_loc, u16 last_loc, bool range) 1118 { 1119 enum ice_flow_fld_match_type t = range ? 1120 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG; 1121 1122 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc); 1123 } 1124 1125 /** 1126 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf 1127 * @seg: packet segment the field being set belongs to 1128 * @off: offset of the raw field from the beginning of the segment in bytes 1129 * @len: length of the raw pattern to be matched 1130 * @val_loc: location of the value to match from entry's input buffer 1131 * @mask_loc: location of mask value from entry's input buffer 1132 * 1133 * This function specifies the offset of the raw field to be match from the 1134 * beginning of the specified packet segment, and the locations, in the form of 1135 * byte offsets from the start of the input buffer for a flow entry, from where 1136 * the value to match and the mask value to be extracted. These locations are 1137 * then stored in the flow profile. When adding flow entries to the associated 1138 * flow profile, these locations can be used to quickly extract the values to 1139 * create the content of a match entry. This function should only be used for 1140 * fixed-size data structures. 1141 */ 1142 void 1143 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, 1144 u16 val_loc, u16 mask_loc) 1145 { 1146 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) { 1147 seg->raws[seg->raws_cnt].off = off; 1148 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE; 1149 seg->raws[seg->raws_cnt].info.src.val = val_loc; 1150 seg->raws[seg->raws_cnt].info.src.mask = mask_loc; 1151 /* The "last" field is used to store the length of the field */ 1152 seg->raws[seg->raws_cnt].info.src.last = len; 1153 } 1154 1155 /* Overflows of "raws" will be handled as an error condition later in 1156 * the flow when this information is processed. 1157 */ 1158 seg->raws_cnt++; 1159 } 1160 1161 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \ 1162 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6) 1163 1164 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \ 1165 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP) 1166 1167 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \ 1168 (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \ 1169 ICE_FLOW_RSS_SEG_HDR_L4_MASKS) 1170 1171 /** 1172 * ice_flow_set_rss_seg_info - setup packet segments for RSS 1173 * @segs: pointer to the flow field segment(s) 1174 * @hash_fields: fields to be hashed on for the segment(s) 1175 * @flow_hdr: protocol header fields within a packet segment 1176 * 1177 * Helper function to extract fields from hash bitmap and use flow 1178 * header value to set flow field segment for further use in flow 1179 * profile entry or removal. 1180 */ 1181 static enum ice_status 1182 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, 1183 u32 flow_hdr) 1184 { 1185 u64 val; 1186 u8 i; 1187 1188 for_each_set_bit(i, (unsigned long *)&hash_fields, 1189 ICE_FLOW_FIELD_IDX_MAX) 1190 ice_flow_set_fld(segs, (enum ice_flow_field)i, 1191 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, 1192 ICE_FLOW_FLD_OFF_INVAL, false); 1193 1194 ICE_FLOW_SET_HDRS(segs, flow_hdr); 1195 1196 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS) 1197 return ICE_ERR_PARAM; 1198 1199 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); 1200 if (val && !is_power_of_2(val)) 1201 return ICE_ERR_CFG; 1202 1203 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); 1204 if (val && !is_power_of_2(val)) 1205 return ICE_ERR_CFG; 1206 1207 return 0; 1208 } 1209 1210 /** 1211 * ice_rem_vsi_rss_list - remove VSI from RSS list 1212 * @hw: pointer to the hardware structure 1213 * @vsi_handle: software VSI handle 1214 * 1215 * Remove the VSI from all RSS configurations in the list. 1216 */ 1217 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle) 1218 { 1219 struct ice_rss_cfg *r, *tmp; 1220 1221 if (list_empty(&hw->rss_list_head)) 1222 return; 1223 1224 mutex_lock(&hw->rss_locks); 1225 list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) 1226 if (test_and_clear_bit(vsi_handle, r->vsis)) 1227 if (bitmap_empty(r->vsis, ICE_MAX_VSI)) { 1228 list_del(&r->l_entry); 1229 devm_kfree(ice_hw_to_dev(hw), r); 1230 } 1231 mutex_unlock(&hw->rss_locks); 1232 } 1233 1234 /** 1235 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI 1236 * @hw: pointer to the hardware structure 1237 * @vsi_handle: software VSI handle 1238 * 1239 * This function will iterate through all flow profiles and disassociate 1240 * the VSI from that profile. If the flow profile has no VSIs it will 1241 * be removed. 1242 */ 1243 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) 1244 { 1245 const enum ice_block blk = ICE_BLK_RSS; 1246 struct ice_flow_prof *p, *t; 1247 enum ice_status status = 0; 1248 1249 if (!ice_is_vsi_valid(hw, vsi_handle)) 1250 return ICE_ERR_PARAM; 1251 1252 if (list_empty(&hw->fl_profs[blk])) 1253 return 0; 1254 1255 mutex_lock(&hw->rss_locks); 1256 list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry) 1257 if (test_bit(vsi_handle, p->vsis)) { 1258 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle); 1259 if (status) 1260 break; 1261 1262 if (bitmap_empty(p->vsis, ICE_MAX_VSI)) { 1263 status = ice_flow_rem_prof(hw, blk, p->id); 1264 if (status) 1265 break; 1266 } 1267 } 1268 mutex_unlock(&hw->rss_locks); 1269 1270 return status; 1271 } 1272 1273 /** 1274 * ice_rem_rss_list - remove RSS configuration from list 1275 * @hw: pointer to the hardware structure 1276 * @vsi_handle: software VSI handle 1277 * @prof: pointer to flow profile 1278 * 1279 * Assumption: lock has already been acquired for RSS list 1280 */ 1281 static void 1282 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) 1283 { 1284 struct ice_rss_cfg *r, *tmp; 1285 1286 /* Search for RSS hash fields associated to the VSI that match the 1287 * hash configurations associated to the flow profile. If found 1288 * remove from the RSS entry list of the VSI context and delete entry. 1289 */ 1290 list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) 1291 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && 1292 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { 1293 clear_bit(vsi_handle, r->vsis); 1294 if (bitmap_empty(r->vsis, ICE_MAX_VSI)) { 1295 list_del(&r->l_entry); 1296 devm_kfree(ice_hw_to_dev(hw), r); 1297 } 1298 return; 1299 } 1300 } 1301 1302 /** 1303 * ice_add_rss_list - add RSS configuration to list 1304 * @hw: pointer to the hardware structure 1305 * @vsi_handle: software VSI handle 1306 * @prof: pointer to flow profile 1307 * 1308 * Assumption: lock has already been acquired for RSS list 1309 */ 1310 static enum ice_status 1311 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) 1312 { 1313 struct ice_rss_cfg *r, *rss_cfg; 1314 1315 list_for_each_entry(r, &hw->rss_list_head, l_entry) 1316 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && 1317 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { 1318 set_bit(vsi_handle, r->vsis); 1319 return 0; 1320 } 1321 1322 rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg), 1323 GFP_KERNEL); 1324 if (!rss_cfg) 1325 return ICE_ERR_NO_MEMORY; 1326 1327 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match; 1328 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs; 1329 set_bit(vsi_handle, rss_cfg->vsis); 1330 1331 list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head); 1332 1333 return 0; 1334 } 1335 1336 #define ICE_FLOW_PROF_HASH_S 0 1337 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S) 1338 #define ICE_FLOW_PROF_HDR_S 32 1339 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S) 1340 #define ICE_FLOW_PROF_ENCAP_S 63 1341 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S)) 1342 1343 #define ICE_RSS_OUTER_HEADERS 1 1344 #define ICE_RSS_INNER_HEADERS 2 1345 1346 /* Flow profile ID format: 1347 * [0:31] - Packet match fields 1348 * [32:62] - Protocol header 1349 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled 1350 */ 1351 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \ 1352 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ 1353 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ 1354 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0)) 1355 1356 /** 1357 * ice_add_rss_cfg_sync - add an RSS configuration 1358 * @hw: pointer to the hardware structure 1359 * @vsi_handle: software VSI handle 1360 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure 1361 * @addl_hdrs: protocol header fields 1362 * @segs_cnt: packet segment count 1363 * 1364 * Assumption: lock has already been acquired for RSS list 1365 */ 1366 static enum ice_status 1367 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, 1368 u32 addl_hdrs, u8 segs_cnt) 1369 { 1370 const enum ice_block blk = ICE_BLK_RSS; 1371 struct ice_flow_prof *prof = NULL; 1372 struct ice_flow_seg_info *segs; 1373 enum ice_status status; 1374 1375 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX) 1376 return ICE_ERR_PARAM; 1377 1378 segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); 1379 if (!segs) 1380 return ICE_ERR_NO_MEMORY; 1381 1382 /* Construct the packet segment info from the hashed fields */ 1383 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, 1384 addl_hdrs); 1385 if (status) 1386 goto exit; 1387 1388 /* Search for a flow profile that has matching headers, hash fields 1389 * and has the input VSI associated to it. If found, no further 1390 * operations required and exit. 1391 */ 1392 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 1393 vsi_handle, 1394 ICE_FLOW_FIND_PROF_CHK_FLDS | 1395 ICE_FLOW_FIND_PROF_CHK_VSI); 1396 if (prof) 1397 goto exit; 1398 1399 /* Check if a flow profile exists with the same protocol headers and 1400 * associated with the input VSI. If so disassociate the VSI from 1401 * this profile. The VSI will be added to a new profile created with 1402 * the protocol header and new hash field configuration. 1403 */ 1404 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 1405 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI); 1406 if (prof) { 1407 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); 1408 if (!status) 1409 ice_rem_rss_list(hw, vsi_handle, prof); 1410 else 1411 goto exit; 1412 1413 /* Remove profile if it has no VSIs associated */ 1414 if (bitmap_empty(prof->vsis, ICE_MAX_VSI)) { 1415 status = ice_flow_rem_prof(hw, blk, prof->id); 1416 if (status) 1417 goto exit; 1418 } 1419 } 1420 1421 /* Search for a profile that has same match fields only. If this 1422 * exists then associate the VSI to this profile. 1423 */ 1424 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 1425 vsi_handle, 1426 ICE_FLOW_FIND_PROF_CHK_FLDS); 1427 if (prof) { 1428 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 1429 if (!status) 1430 status = ice_add_rss_list(hw, vsi_handle, prof); 1431 goto exit; 1432 } 1433 1434 /* Create a new flow profile with generated profile and packet 1435 * segment information. 1436 */ 1437 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX, 1438 ICE_FLOW_GEN_PROFID(hashed_flds, 1439 segs[segs_cnt - 1].hdrs, 1440 segs_cnt), 1441 segs, segs_cnt, &prof); 1442 if (status) 1443 goto exit; 1444 1445 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 1446 /* If association to a new flow profile failed then this profile can 1447 * be removed. 1448 */ 1449 if (status) { 1450 ice_flow_rem_prof(hw, blk, prof->id); 1451 goto exit; 1452 } 1453 1454 status = ice_add_rss_list(hw, vsi_handle, prof); 1455 1456 exit: 1457 kfree(segs); 1458 return status; 1459 } 1460 1461 /** 1462 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields 1463 * @hw: pointer to the hardware structure 1464 * @vsi_handle: software VSI handle 1465 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure 1466 * @addl_hdrs: protocol header fields 1467 * 1468 * This function will generate a flow profile based on fields associated with 1469 * the input fields to hash on, the flow type and use the VSI number to add 1470 * a flow entry to the profile. 1471 */ 1472 enum ice_status 1473 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, 1474 u32 addl_hdrs) 1475 { 1476 enum ice_status status; 1477 1478 if (hashed_flds == ICE_HASH_INVALID || 1479 !ice_is_vsi_valid(hw, vsi_handle)) 1480 return ICE_ERR_PARAM; 1481 1482 mutex_lock(&hw->rss_locks); 1483 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, 1484 ICE_RSS_OUTER_HEADERS); 1485 if (!status) 1486 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, 1487 addl_hdrs, ICE_RSS_INNER_HEADERS); 1488 mutex_unlock(&hw->rss_locks); 1489 1490 return status; 1491 } 1492 1493 /* Mapping of AVF hash bit fields to an L3-L4 hash combination. 1494 * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash, 1495 * convert its values to their appropriate flow L3, L4 values. 1496 */ 1497 #define ICE_FLOW_AVF_RSS_IPV4_MASKS \ 1498 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \ 1499 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4)) 1500 #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \ 1501 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \ 1502 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP)) 1503 #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \ 1504 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \ 1505 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \ 1506 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP)) 1507 #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \ 1508 (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \ 1509 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) 1510 1511 #define ICE_FLOW_AVF_RSS_IPV6_MASKS \ 1512 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \ 1513 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6)) 1514 #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \ 1515 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \ 1516 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \ 1517 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP)) 1518 #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \ 1519 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \ 1520 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP)) 1521 #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \ 1522 (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \ 1523 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) 1524 1525 /** 1526 * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver 1527 * @hw: pointer to the hardware structure 1528 * @vsi_handle: software VSI handle 1529 * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure 1530 * 1531 * This function will take the hash bitmap provided by the AVF driver via a 1532 * message, convert it to ICE-compatible values, and configure RSS flow 1533 * profiles. 1534 */ 1535 enum ice_status 1536 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) 1537 { 1538 enum ice_status status = 0; 1539 u64 hash_flds; 1540 1541 if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || 1542 !ice_is_vsi_valid(hw, vsi_handle)) 1543 return ICE_ERR_PARAM; 1544 1545 /* Make sure no unsupported bits are specified */ 1546 if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS | 1547 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)) 1548 return ICE_ERR_CFG; 1549 1550 hash_flds = avf_hash; 1551 1552 /* Always create an L3 RSS configuration for any L4 RSS configuration */ 1553 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) 1554 hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS; 1555 1556 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) 1557 hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS; 1558 1559 /* Create the corresponding RSS configuration for each valid hash bit */ 1560 while (hash_flds) { 1561 u64 rss_hash = ICE_HASH_INVALID; 1562 1563 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) { 1564 if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) { 1565 rss_hash = ICE_FLOW_HASH_IPV4; 1566 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS; 1567 } else if (hash_flds & 1568 ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) { 1569 rss_hash = ICE_FLOW_HASH_IPV4 | 1570 ICE_FLOW_HASH_TCP_PORT; 1571 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS; 1572 } else if (hash_flds & 1573 ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) { 1574 rss_hash = ICE_FLOW_HASH_IPV4 | 1575 ICE_FLOW_HASH_UDP_PORT; 1576 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS; 1577 } else if (hash_flds & 1578 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) { 1579 rss_hash = ICE_FLOW_HASH_IPV4 | 1580 ICE_FLOW_HASH_SCTP_PORT; 1581 hash_flds &= 1582 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP); 1583 } 1584 } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) { 1585 if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) { 1586 rss_hash = ICE_FLOW_HASH_IPV6; 1587 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS; 1588 } else if (hash_flds & 1589 ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) { 1590 rss_hash = ICE_FLOW_HASH_IPV6 | 1591 ICE_FLOW_HASH_TCP_PORT; 1592 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS; 1593 } else if (hash_flds & 1594 ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) { 1595 rss_hash = ICE_FLOW_HASH_IPV6 | 1596 ICE_FLOW_HASH_UDP_PORT; 1597 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS; 1598 } else if (hash_flds & 1599 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) { 1600 rss_hash = ICE_FLOW_HASH_IPV6 | 1601 ICE_FLOW_HASH_SCTP_PORT; 1602 hash_flds &= 1603 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP); 1604 } 1605 } 1606 1607 if (rss_hash == ICE_HASH_INVALID) 1608 return ICE_ERR_OUT_OF_RANGE; 1609 1610 status = ice_add_rss_cfg(hw, vsi_handle, rss_hash, 1611 ICE_FLOW_SEG_HDR_NONE); 1612 if (status) 1613 break; 1614 } 1615 1616 return status; 1617 } 1618 1619 /** 1620 * ice_replay_rss_cfg - replay RSS configurations associated with VSI 1621 * @hw: pointer to the hardware structure 1622 * @vsi_handle: software VSI handle 1623 */ 1624 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) 1625 { 1626 enum ice_status status = 0; 1627 struct ice_rss_cfg *r; 1628 1629 if (!ice_is_vsi_valid(hw, vsi_handle)) 1630 return ICE_ERR_PARAM; 1631 1632 mutex_lock(&hw->rss_locks); 1633 list_for_each_entry(r, &hw->rss_list_head, l_entry) { 1634 if (test_bit(vsi_handle, r->vsis)) { 1635 status = ice_add_rss_cfg_sync(hw, vsi_handle, 1636 r->hashed_flds, 1637 r->packet_hdr, 1638 ICE_RSS_OUTER_HEADERS); 1639 if (status) 1640 break; 1641 status = ice_add_rss_cfg_sync(hw, vsi_handle, 1642 r->hashed_flds, 1643 r->packet_hdr, 1644 ICE_RSS_INNER_HEADERS); 1645 if (status) 1646 break; 1647 } 1648 } 1649 mutex_unlock(&hw->rss_locks); 1650 1651 return status; 1652 } 1653 1654 /** 1655 * ice_get_rss_cfg - returns hashed fields for the given header types 1656 * @hw: pointer to the hardware structure 1657 * @vsi_handle: software VSI handle 1658 * @hdrs: protocol header type 1659 * 1660 * This function will return the match fields of the first instance of flow 1661 * profile having the given header types and containing input VSI 1662 */ 1663 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) 1664 { 1665 u64 rss_hash = ICE_HASH_INVALID; 1666 struct ice_rss_cfg *r; 1667 1668 /* verify if the protocol header is non zero and VSI is valid */ 1669 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle)) 1670 return ICE_HASH_INVALID; 1671 1672 mutex_lock(&hw->rss_locks); 1673 list_for_each_entry(r, &hw->rss_list_head, l_entry) 1674 if (test_bit(vsi_handle, r->vsis) && 1675 r->packet_hdr == hdrs) { 1676 rss_hash = r->hashed_flds; 1677 break; 1678 } 1679 mutex_unlock(&hw->rss_locks); 1680 1681 return rss_hash; 1682 } 1683