1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_lib.h" 5 #include "ice_switch.h" 6 7 #define ICE_ETH_DA_OFFSET 0 8 #define ICE_ETH_ETHTYPE_OFFSET 12 9 #define ICE_ETH_VLAN_TCI_OFFSET 14 10 #define ICE_MAX_VLAN_ID 0xFFF 11 #define ICE_IPV6_ETHER_ID 0x86DD 12 13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem 14 * struct to configure any switch filter rules. 15 * {DA (6 bytes), SA(6 bytes), 16 * Ether type (2 bytes for header without VLAN tag) OR 17 * VLAN tag (4 bytes for header with VLAN tag) } 18 * 19 * Word on Hardcoded values 20 * byte 0 = 0x2: to identify it as locally administered DA MAC 21 * byte 6 = 0x2: to identify it as locally administered SA MAC 22 * byte 12 = 0x81 & byte 13 = 0x00: 23 * In case of VLAN filter first two bytes defines ether type (0x8100) 24 * and remaining two bytes are placeholder for programming a given VLAN ID 25 * In case of Ether type filter it is treated as header without VLAN tag 26 * and byte 12 and 13 is used to program a given Ether type instead 27 */ 28 #define DUMMY_ETH_HDR_LEN 16 29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 30 0x2, 0, 0, 0, 0, 0, 31 0x81, 0, 0, 0}; 32 33 struct ice_dummy_pkt_offsets { 34 enum ice_protocol_type type; 35 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ 36 }; 37 38 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = { 39 { ICE_MAC_OFOS, 0 }, 40 { ICE_ETYPE_OL, 12 }, 41 { ICE_IPV4_OFOS, 14 }, 42 { ICE_NVGRE, 34 }, 43 { ICE_MAC_IL, 42 }, 44 { ICE_ETYPE_IL, 54 }, 45 { ICE_IPV4_IL, 56 }, 46 { ICE_TCP_IL, 76 }, 47 { ICE_PROTOCOL_LAST, 0 }, 48 }; 49 50 static const u8 dummy_gre_tcp_packet[] = { 51 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 52 0x00, 0x00, 0x00, 0x00, 53 0x00, 0x00, 0x00, 0x00, 54 55 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 56 57 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */ 58 0x00, 0x00, 0x00, 0x00, 59 0x00, 0x2F, 0x00, 0x00, 60 0x00, 0x00, 0x00, 0x00, 61 0x00, 0x00, 0x00, 0x00, 62 63 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ 64 0x00, 0x00, 0x00, 0x00, 65 66 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 67 0x00, 0x00, 0x00, 0x00, 68 0x00, 0x00, 0x00, 0x00, 69 70 0x08, 0x00, /* ICE_ETYPE_IL 54 */ 71 72 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ 73 0x00, 0x00, 0x00, 0x00, 74 0x00, 0x06, 0x00, 0x00, 75 0x00, 0x00, 0x00, 0x00, 76 0x00, 0x00, 0x00, 0x00, 77 78 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */ 79 0x00, 0x00, 0x00, 0x00, 80 0x00, 0x00, 0x00, 0x00, 81 0x50, 0x02, 0x20, 0x00, 82 0x00, 0x00, 0x00, 0x00 83 }; 84 85 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = { 86 { ICE_MAC_OFOS, 0 }, 87 { ICE_ETYPE_OL, 12 }, 88 { ICE_IPV4_OFOS, 14 }, 89 { ICE_NVGRE, 34 }, 90 { ICE_MAC_IL, 42 }, 91 { ICE_ETYPE_IL, 54 }, 92 { ICE_IPV4_IL, 56 }, 93 { ICE_UDP_ILOS, 76 }, 94 { ICE_PROTOCOL_LAST, 0 }, 95 }; 96 97 static const u8 dummy_gre_udp_packet[] = { 98 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 99 0x00, 0x00, 0x00, 0x00, 100 0x00, 0x00, 0x00, 0x00, 101 102 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 103 104 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */ 105 0x00, 0x00, 0x00, 0x00, 106 0x00, 0x2F, 0x00, 0x00, 107 0x00, 0x00, 0x00, 0x00, 108 0x00, 0x00, 0x00, 0x00, 109 110 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ 111 0x00, 0x00, 0x00, 0x00, 112 113 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 114 0x00, 0x00, 0x00, 0x00, 115 0x00, 0x00, 0x00, 0x00, 116 117 0x08, 0x00, /* ICE_ETYPE_IL 54 */ 118 119 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ 120 0x00, 0x00, 0x00, 0x00, 121 0x00, 0x11, 0x00, 0x00, 122 0x00, 0x00, 0x00, 0x00, 123 0x00, 0x00, 0x00, 0x00, 124 125 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */ 126 0x00, 0x08, 0x00, 0x00, 127 }; 128 129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { 130 { ICE_MAC_OFOS, 0 }, 131 { ICE_ETYPE_OL, 12 }, 132 { ICE_IPV4_OFOS, 14 }, 133 { ICE_UDP_OF, 34 }, 134 { ICE_VXLAN, 42 }, 135 { ICE_GENEVE, 42 }, 136 { ICE_VXLAN_GPE, 42 }, 137 { ICE_MAC_IL, 50 }, 138 { ICE_ETYPE_IL, 62 }, 139 { ICE_IPV4_IL, 64 }, 140 { ICE_TCP_IL, 84 }, 141 { ICE_PROTOCOL_LAST, 0 }, 142 }; 143 144 static const u8 dummy_udp_tun_tcp_packet[] = { 145 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 146 0x00, 0x00, 0x00, 0x00, 147 0x00, 0x00, 0x00, 0x00, 148 149 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 150 151 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */ 152 0x00, 0x01, 0x00, 0x00, 153 0x40, 0x11, 0x00, 0x00, 154 0x00, 0x00, 0x00, 0x00, 155 0x00, 0x00, 0x00, 0x00, 156 157 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ 158 0x00, 0x46, 0x00, 0x00, 159 160 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ 161 0x00, 0x00, 0x00, 0x00, 162 163 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 164 0x00, 0x00, 0x00, 0x00, 165 0x00, 0x00, 0x00, 0x00, 166 167 0x08, 0x00, /* ICE_ETYPE_IL 62 */ 168 169 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */ 170 0x00, 0x01, 0x00, 0x00, 171 0x40, 0x06, 0x00, 0x00, 172 0x00, 0x00, 0x00, 0x00, 173 0x00, 0x00, 0x00, 0x00, 174 175 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */ 176 0x00, 0x00, 0x00, 0x00, 177 0x00, 0x00, 0x00, 0x00, 178 0x50, 0x02, 0x20, 0x00, 179 0x00, 0x00, 0x00, 0x00 180 }; 181 182 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = { 183 { ICE_MAC_OFOS, 0 }, 184 { ICE_ETYPE_OL, 12 }, 185 { ICE_IPV4_OFOS, 14 }, 186 { ICE_UDP_OF, 34 }, 187 { ICE_VXLAN, 42 }, 188 { ICE_GENEVE, 42 }, 189 { ICE_VXLAN_GPE, 42 }, 190 { ICE_MAC_IL, 50 }, 191 { ICE_ETYPE_IL, 62 }, 192 { ICE_IPV4_IL, 64 }, 193 { ICE_UDP_ILOS, 84 }, 194 { ICE_PROTOCOL_LAST, 0 }, 195 }; 196 197 static const u8 dummy_udp_tun_udp_packet[] = { 198 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 199 0x00, 0x00, 0x00, 0x00, 200 0x00, 0x00, 0x00, 0x00, 201 202 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 203 204 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */ 205 0x00, 0x01, 0x00, 0x00, 206 0x00, 0x11, 0x00, 0x00, 207 0x00, 0x00, 0x00, 0x00, 208 0x00, 0x00, 0x00, 0x00, 209 210 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ 211 0x00, 0x3a, 0x00, 0x00, 212 213 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ 214 0x00, 0x00, 0x00, 0x00, 215 216 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 217 0x00, 0x00, 0x00, 0x00, 218 0x00, 0x00, 0x00, 0x00, 219 220 0x08, 0x00, /* ICE_ETYPE_IL 62 */ 221 222 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */ 223 0x00, 0x01, 0x00, 0x00, 224 0x00, 0x11, 0x00, 0x00, 225 0x00, 0x00, 0x00, 0x00, 226 0x00, 0x00, 0x00, 0x00, 227 228 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */ 229 0x00, 0x08, 0x00, 0x00, 230 }; 231 232 static const struct ice_dummy_pkt_offsets 233 dummy_gre_ipv6_tcp_packet_offsets[] = { 234 { ICE_MAC_OFOS, 0 }, 235 { ICE_ETYPE_OL, 12 }, 236 { ICE_IPV4_OFOS, 14 }, 237 { ICE_NVGRE, 34 }, 238 { ICE_MAC_IL, 42 }, 239 { ICE_ETYPE_IL, 54 }, 240 { ICE_IPV6_IL, 56 }, 241 { ICE_TCP_IL, 96 }, 242 { ICE_PROTOCOL_LAST, 0 }, 243 }; 244 245 static const u8 dummy_gre_ipv6_tcp_packet[] = { 246 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 247 0x00, 0x00, 0x00, 0x00, 248 0x00, 0x00, 0x00, 0x00, 249 250 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 251 252 0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */ 253 0x00, 0x00, 0x00, 0x00, 254 0x00, 0x2F, 0x00, 0x00, 255 0x00, 0x00, 0x00, 0x00, 256 0x00, 0x00, 0x00, 0x00, 257 258 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ 259 0x00, 0x00, 0x00, 0x00, 260 261 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 262 0x00, 0x00, 0x00, 0x00, 263 0x00, 0x00, 0x00, 0x00, 264 265 0x86, 0xdd, /* ICE_ETYPE_IL 54 */ 266 267 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */ 268 0x00, 0x08, 0x06, 0x40, 269 0x00, 0x00, 0x00, 0x00, 270 0x00, 0x00, 0x00, 0x00, 271 0x00, 0x00, 0x00, 0x00, 272 0x00, 0x00, 0x00, 0x00, 273 0x00, 0x00, 0x00, 0x00, 274 0x00, 0x00, 0x00, 0x00, 275 0x00, 0x00, 0x00, 0x00, 276 0x00, 0x00, 0x00, 0x00, 277 278 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */ 279 0x00, 0x00, 0x00, 0x00, 280 0x00, 0x00, 0x00, 0x00, 281 0x50, 0x02, 0x20, 0x00, 282 0x00, 0x00, 0x00, 0x00 283 }; 284 285 static const struct ice_dummy_pkt_offsets 286 dummy_gre_ipv6_udp_packet_offsets[] = { 287 { ICE_MAC_OFOS, 0 }, 288 { ICE_ETYPE_OL, 12 }, 289 { ICE_IPV4_OFOS, 14 }, 290 { ICE_NVGRE, 34 }, 291 { ICE_MAC_IL, 42 }, 292 { ICE_ETYPE_IL, 54 }, 293 { ICE_IPV6_IL, 56 }, 294 { ICE_UDP_ILOS, 96 }, 295 { ICE_PROTOCOL_LAST, 0 }, 296 }; 297 298 static const u8 dummy_gre_ipv6_udp_packet[] = { 299 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 300 0x00, 0x00, 0x00, 0x00, 301 0x00, 0x00, 0x00, 0x00, 302 303 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 304 305 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */ 306 0x00, 0x00, 0x00, 0x00, 307 0x00, 0x2F, 0x00, 0x00, 308 0x00, 0x00, 0x00, 0x00, 309 0x00, 0x00, 0x00, 0x00, 310 311 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ 312 0x00, 0x00, 0x00, 0x00, 313 314 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 315 0x00, 0x00, 0x00, 0x00, 316 0x00, 0x00, 0x00, 0x00, 317 318 0x86, 0xdd, /* ICE_ETYPE_IL 54 */ 319 320 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */ 321 0x00, 0x08, 0x11, 0x40, 322 0x00, 0x00, 0x00, 0x00, 323 0x00, 0x00, 0x00, 0x00, 324 0x00, 0x00, 0x00, 0x00, 325 0x00, 0x00, 0x00, 0x00, 326 0x00, 0x00, 0x00, 0x00, 327 0x00, 0x00, 0x00, 0x00, 328 0x00, 0x00, 0x00, 0x00, 329 0x00, 0x00, 0x00, 0x00, 330 331 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */ 332 0x00, 0x08, 0x00, 0x00, 333 }; 334 335 static const struct ice_dummy_pkt_offsets 336 dummy_udp_tun_ipv6_tcp_packet_offsets[] = { 337 { ICE_MAC_OFOS, 0 }, 338 { ICE_ETYPE_OL, 12 }, 339 { ICE_IPV4_OFOS, 14 }, 340 { ICE_UDP_OF, 34 }, 341 { ICE_VXLAN, 42 }, 342 { ICE_GENEVE, 42 }, 343 { ICE_VXLAN_GPE, 42 }, 344 { ICE_MAC_IL, 50 }, 345 { ICE_ETYPE_IL, 62 }, 346 { ICE_IPV6_IL, 64 }, 347 { ICE_TCP_IL, 104 }, 348 { ICE_PROTOCOL_LAST, 0 }, 349 }; 350 351 static const u8 dummy_udp_tun_ipv6_tcp_packet[] = { 352 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 353 0x00, 0x00, 0x00, 0x00, 354 0x00, 0x00, 0x00, 0x00, 355 356 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 357 358 0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */ 359 0x00, 0x01, 0x00, 0x00, 360 0x40, 0x11, 0x00, 0x00, 361 0x00, 0x00, 0x00, 0x00, 362 0x00, 0x00, 0x00, 0x00, 363 364 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ 365 0x00, 0x5a, 0x00, 0x00, 366 367 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ 368 0x00, 0x00, 0x00, 0x00, 369 370 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 371 0x00, 0x00, 0x00, 0x00, 372 0x00, 0x00, 0x00, 0x00, 373 374 0x86, 0xdd, /* ICE_ETYPE_IL 62 */ 375 376 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */ 377 0x00, 0x08, 0x06, 0x40, 378 0x00, 0x00, 0x00, 0x00, 379 0x00, 0x00, 0x00, 0x00, 380 0x00, 0x00, 0x00, 0x00, 381 0x00, 0x00, 0x00, 0x00, 382 0x00, 0x00, 0x00, 0x00, 383 0x00, 0x00, 0x00, 0x00, 384 0x00, 0x00, 0x00, 0x00, 385 0x00, 0x00, 0x00, 0x00, 386 387 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */ 388 0x00, 0x00, 0x00, 0x00, 389 0x00, 0x00, 0x00, 0x00, 390 0x50, 0x02, 0x20, 0x00, 391 0x00, 0x00, 0x00, 0x00 392 }; 393 394 static const struct ice_dummy_pkt_offsets 395 dummy_udp_tun_ipv6_udp_packet_offsets[] = { 396 { ICE_MAC_OFOS, 0 }, 397 { ICE_ETYPE_OL, 12 }, 398 { ICE_IPV4_OFOS, 14 }, 399 { ICE_UDP_OF, 34 }, 400 { ICE_VXLAN, 42 }, 401 { ICE_GENEVE, 42 }, 402 { ICE_VXLAN_GPE, 42 }, 403 { ICE_MAC_IL, 50 }, 404 { ICE_ETYPE_IL, 62 }, 405 { ICE_IPV6_IL, 64 }, 406 { ICE_UDP_ILOS, 104 }, 407 { ICE_PROTOCOL_LAST, 0 }, 408 }; 409 410 static const u8 dummy_udp_tun_ipv6_udp_packet[] = { 411 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 412 0x00, 0x00, 0x00, 0x00, 413 0x00, 0x00, 0x00, 0x00, 414 415 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 416 417 0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */ 418 0x00, 0x01, 0x00, 0x00, 419 0x00, 0x11, 0x00, 0x00, 420 0x00, 0x00, 0x00, 0x00, 421 0x00, 0x00, 0x00, 0x00, 422 423 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ 424 0x00, 0x4e, 0x00, 0x00, 425 426 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ 427 0x00, 0x00, 0x00, 0x00, 428 429 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 430 0x00, 0x00, 0x00, 0x00, 431 0x00, 0x00, 0x00, 0x00, 432 433 0x86, 0xdd, /* ICE_ETYPE_IL 62 */ 434 435 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */ 436 0x00, 0x08, 0x11, 0x40, 437 0x00, 0x00, 0x00, 0x00, 438 0x00, 0x00, 0x00, 0x00, 439 0x00, 0x00, 0x00, 0x00, 440 0x00, 0x00, 0x00, 0x00, 441 0x00, 0x00, 0x00, 0x00, 442 0x00, 0x00, 0x00, 0x00, 443 0x00, 0x00, 0x00, 0x00, 444 0x00, 0x00, 0x00, 0x00, 445 446 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */ 447 0x00, 0x08, 0x00, 0x00, 448 }; 449 450 /* offset info for MAC + IPv4 + UDP dummy packet */ 451 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { 452 { ICE_MAC_OFOS, 0 }, 453 { ICE_ETYPE_OL, 12 }, 454 { ICE_IPV4_OFOS, 14 }, 455 { ICE_UDP_ILOS, 34 }, 456 { ICE_PROTOCOL_LAST, 0 }, 457 }; 458 459 /* Dummy packet for MAC + IPv4 + UDP */ 460 static const u8 dummy_udp_packet[] = { 461 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 462 0x00, 0x00, 0x00, 0x00, 463 0x00, 0x00, 0x00, 0x00, 464 465 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 466 467 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */ 468 0x00, 0x01, 0x00, 0x00, 469 0x00, 0x11, 0x00, 0x00, 470 0x00, 0x00, 0x00, 0x00, 471 0x00, 0x00, 0x00, 0x00, 472 473 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */ 474 0x00, 0x08, 0x00, 0x00, 475 476 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 477 }; 478 479 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */ 480 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = { 481 { ICE_MAC_OFOS, 0 }, 482 { ICE_VLAN_OFOS, 12 }, 483 { ICE_ETYPE_OL, 16 }, 484 { ICE_IPV4_OFOS, 18 }, 485 { ICE_UDP_ILOS, 38 }, 486 { ICE_PROTOCOL_LAST, 0 }, 487 }; 488 489 /* C-tag (801.1Q), IPv4:UDP dummy packet */ 490 static const u8 dummy_vlan_udp_packet[] = { 491 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 492 0x00, 0x00, 0x00, 0x00, 493 0x00, 0x00, 0x00, 0x00, 494 495 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 496 497 0x08, 0x00, /* ICE_ETYPE_OL 16 */ 498 499 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */ 500 0x00, 0x01, 0x00, 0x00, 501 0x00, 0x11, 0x00, 0x00, 502 0x00, 0x00, 0x00, 0x00, 503 0x00, 0x00, 0x00, 0x00, 504 505 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */ 506 0x00, 0x08, 0x00, 0x00, 507 508 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 509 }; 510 511 /* offset info for MAC + IPv4 + TCP dummy packet */ 512 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = { 513 { ICE_MAC_OFOS, 0 }, 514 { ICE_ETYPE_OL, 12 }, 515 { ICE_IPV4_OFOS, 14 }, 516 { ICE_TCP_IL, 34 }, 517 { ICE_PROTOCOL_LAST, 0 }, 518 }; 519 520 /* Dummy packet for MAC + IPv4 + TCP */ 521 static const u8 dummy_tcp_packet[] = { 522 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 523 0x00, 0x00, 0x00, 0x00, 524 0x00, 0x00, 0x00, 0x00, 525 526 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 527 528 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */ 529 0x00, 0x01, 0x00, 0x00, 530 0x00, 0x06, 0x00, 0x00, 531 0x00, 0x00, 0x00, 0x00, 532 0x00, 0x00, 0x00, 0x00, 533 534 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */ 535 0x00, 0x00, 0x00, 0x00, 536 0x00, 0x00, 0x00, 0x00, 537 0x50, 0x00, 0x00, 0x00, 538 0x00, 0x00, 0x00, 0x00, 539 540 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 541 }; 542 543 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */ 544 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = { 545 { ICE_MAC_OFOS, 0 }, 546 { ICE_VLAN_OFOS, 12 }, 547 { ICE_ETYPE_OL, 16 }, 548 { ICE_IPV4_OFOS, 18 }, 549 { ICE_TCP_IL, 38 }, 550 { ICE_PROTOCOL_LAST, 0 }, 551 }; 552 553 /* C-tag (801.1Q), IPv4:TCP dummy packet */ 554 static const u8 dummy_vlan_tcp_packet[] = { 555 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 556 0x00, 0x00, 0x00, 0x00, 557 0x00, 0x00, 0x00, 0x00, 558 559 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 560 561 0x08, 0x00, /* ICE_ETYPE_OL 16 */ 562 563 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */ 564 0x00, 0x01, 0x00, 0x00, 565 0x00, 0x06, 0x00, 0x00, 566 0x00, 0x00, 0x00, 0x00, 567 0x00, 0x00, 0x00, 0x00, 568 569 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */ 570 0x00, 0x00, 0x00, 0x00, 571 0x00, 0x00, 0x00, 0x00, 572 0x50, 0x00, 0x00, 0x00, 573 0x00, 0x00, 0x00, 0x00, 574 575 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 576 }; 577 578 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = { 579 { ICE_MAC_OFOS, 0 }, 580 { ICE_ETYPE_OL, 12 }, 581 { ICE_IPV6_OFOS, 14 }, 582 { ICE_TCP_IL, 54 }, 583 { ICE_PROTOCOL_LAST, 0 }, 584 }; 585 586 static const u8 dummy_tcp_ipv6_packet[] = { 587 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 588 0x00, 0x00, 0x00, 0x00, 589 0x00, 0x00, 0x00, 0x00, 590 591 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ 592 593 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ 594 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ 595 0x00, 0x00, 0x00, 0x00, 596 0x00, 0x00, 0x00, 0x00, 597 0x00, 0x00, 0x00, 0x00, 598 0x00, 0x00, 0x00, 0x00, 599 0x00, 0x00, 0x00, 0x00, 600 0x00, 0x00, 0x00, 0x00, 601 0x00, 0x00, 0x00, 0x00, 602 0x00, 0x00, 0x00, 0x00, 603 604 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */ 605 0x00, 0x00, 0x00, 0x00, 606 0x00, 0x00, 0x00, 0x00, 607 0x50, 0x00, 0x00, 0x00, 608 0x00, 0x00, 0x00, 0x00, 609 610 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 611 }; 612 613 /* C-tag (802.1Q): IPv6 + TCP */ 614 static const struct ice_dummy_pkt_offsets 615 dummy_vlan_tcp_ipv6_packet_offsets[] = { 616 { ICE_MAC_OFOS, 0 }, 617 { ICE_VLAN_OFOS, 12 }, 618 { ICE_ETYPE_OL, 16 }, 619 { ICE_IPV6_OFOS, 18 }, 620 { ICE_TCP_IL, 58 }, 621 { ICE_PROTOCOL_LAST, 0 }, 622 }; 623 624 /* C-tag (802.1Q), IPv6 + TCP dummy packet */ 625 static const u8 dummy_vlan_tcp_ipv6_packet[] = { 626 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 627 0x00, 0x00, 0x00, 0x00, 628 0x00, 0x00, 0x00, 0x00, 629 630 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 631 632 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ 633 634 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ 635 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ 636 0x00, 0x00, 0x00, 0x00, 637 0x00, 0x00, 0x00, 0x00, 638 0x00, 0x00, 0x00, 0x00, 639 0x00, 0x00, 0x00, 0x00, 640 0x00, 0x00, 0x00, 0x00, 641 0x00, 0x00, 0x00, 0x00, 642 0x00, 0x00, 0x00, 0x00, 643 0x00, 0x00, 0x00, 0x00, 644 645 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */ 646 0x00, 0x00, 0x00, 0x00, 647 0x00, 0x00, 0x00, 0x00, 648 0x50, 0x00, 0x00, 0x00, 649 0x00, 0x00, 0x00, 0x00, 650 651 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 652 }; 653 654 /* IPv6 + UDP */ 655 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = { 656 { ICE_MAC_OFOS, 0 }, 657 { ICE_ETYPE_OL, 12 }, 658 { ICE_IPV6_OFOS, 14 }, 659 { ICE_UDP_ILOS, 54 }, 660 { ICE_PROTOCOL_LAST, 0 }, 661 }; 662 663 /* IPv6 + UDP dummy packet */ 664 static const u8 dummy_udp_ipv6_packet[] = { 665 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 666 0x00, 0x00, 0x00, 0x00, 667 0x00, 0x00, 0x00, 0x00, 668 669 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ 670 671 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ 672 0x00, 0x10, 0x11, 0x00, /* Next header UDP */ 673 0x00, 0x00, 0x00, 0x00, 674 0x00, 0x00, 0x00, 0x00, 675 0x00, 0x00, 0x00, 0x00, 676 0x00, 0x00, 0x00, 0x00, 677 0x00, 0x00, 0x00, 0x00, 678 0x00, 0x00, 0x00, 0x00, 679 0x00, 0x00, 0x00, 0x00, 680 0x00, 0x00, 0x00, 0x00, 681 682 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */ 683 0x00, 0x10, 0x00, 0x00, 684 685 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */ 686 0x00, 0x00, 0x00, 0x00, 687 688 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 689 }; 690 691 /* C-tag (802.1Q): IPv6 + UDP */ 692 static const struct ice_dummy_pkt_offsets 693 dummy_vlan_udp_ipv6_packet_offsets[] = { 694 { ICE_MAC_OFOS, 0 }, 695 { ICE_VLAN_OFOS, 12 }, 696 { ICE_ETYPE_OL, 16 }, 697 { ICE_IPV6_OFOS, 18 }, 698 { ICE_UDP_ILOS, 58 }, 699 { ICE_PROTOCOL_LAST, 0 }, 700 }; 701 702 /* C-tag (802.1Q), IPv6 + UDP dummy packet */ 703 static const u8 dummy_vlan_udp_ipv6_packet[] = { 704 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 705 0x00, 0x00, 0x00, 0x00, 706 0x00, 0x00, 0x00, 0x00, 707 708 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */ 709 710 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ 711 712 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ 713 0x00, 0x08, 0x11, 0x00, /* Next header UDP */ 714 0x00, 0x00, 0x00, 0x00, 715 0x00, 0x00, 0x00, 0x00, 716 0x00, 0x00, 0x00, 0x00, 717 0x00, 0x00, 0x00, 0x00, 718 0x00, 0x00, 0x00, 0x00, 719 0x00, 0x00, 0x00, 0x00, 720 0x00, 0x00, 0x00, 0x00, 721 0x00, 0x00, 0x00, 0x00, 722 723 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */ 724 0x00, 0x08, 0x00, 0x00, 725 726 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 727 }; 728 729 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */ 730 static const 731 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = { 732 { ICE_MAC_OFOS, 0 }, 733 { ICE_IPV4_OFOS, 14 }, 734 { ICE_UDP_OF, 34 }, 735 { ICE_GTP, 42 }, 736 { ICE_IPV4_IL, 62 }, 737 { ICE_TCP_IL, 82 }, 738 { ICE_PROTOCOL_LAST, 0 }, 739 }; 740 741 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = { 742 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 743 0x00, 0x00, 0x00, 0x00, 744 0x00, 0x00, 0x00, 0x00, 745 0x08, 0x00, 746 747 0x45, 0x00, 0x00, 0x58, /* IP 14 */ 748 0x00, 0x00, 0x00, 0x00, 749 0x00, 0x11, 0x00, 0x00, 750 0x00, 0x00, 0x00, 0x00, 751 0x00, 0x00, 0x00, 0x00, 752 753 0x00, 0x00, 0x08, 0x68, /* UDP 34 */ 754 0x00, 0x44, 0x00, 0x00, 755 756 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */ 757 0x00, 0x00, 0x00, 0x00, 758 0x00, 0x00, 0x00, 0x85, 759 760 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */ 761 0x00, 0x00, 0x00, 0x00, 762 763 0x45, 0x00, 0x00, 0x28, /* IP 62 */ 764 0x00, 0x00, 0x00, 0x00, 765 0x00, 0x06, 0x00, 0x00, 766 0x00, 0x00, 0x00, 0x00, 767 0x00, 0x00, 0x00, 0x00, 768 769 0x00, 0x00, 0x00, 0x00, /* TCP 82 */ 770 0x00, 0x00, 0x00, 0x00, 771 0x00, 0x00, 0x00, 0x00, 772 0x50, 0x00, 0x00, 0x00, 773 0x00, 0x00, 0x00, 0x00, 774 775 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 776 }; 777 778 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */ 779 static const 780 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = { 781 { ICE_MAC_OFOS, 0 }, 782 { ICE_IPV4_OFOS, 14 }, 783 { ICE_UDP_OF, 34 }, 784 { ICE_GTP, 42 }, 785 { ICE_IPV4_IL, 62 }, 786 { ICE_UDP_ILOS, 82 }, 787 { ICE_PROTOCOL_LAST, 0 }, 788 }; 789 790 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = { 791 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 792 0x00, 0x00, 0x00, 0x00, 793 0x00, 0x00, 0x00, 0x00, 794 0x08, 0x00, 795 796 0x45, 0x00, 0x00, 0x4c, /* IP 14 */ 797 0x00, 0x00, 0x00, 0x00, 798 0x00, 0x11, 0x00, 0x00, 799 0x00, 0x00, 0x00, 0x00, 800 0x00, 0x00, 0x00, 0x00, 801 802 0x00, 0x00, 0x08, 0x68, /* UDP 34 */ 803 0x00, 0x38, 0x00, 0x00, 804 805 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */ 806 0x00, 0x00, 0x00, 0x00, 807 0x00, 0x00, 0x00, 0x85, 808 809 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */ 810 0x00, 0x00, 0x00, 0x00, 811 812 0x45, 0x00, 0x00, 0x1c, /* IP 62 */ 813 0x00, 0x00, 0x00, 0x00, 814 0x00, 0x11, 0x00, 0x00, 815 0x00, 0x00, 0x00, 0x00, 816 0x00, 0x00, 0x00, 0x00, 817 818 0x00, 0x00, 0x00, 0x00, /* UDP 82 */ 819 0x00, 0x08, 0x00, 0x00, 820 821 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 822 }; 823 824 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */ 825 static const 826 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = { 827 { ICE_MAC_OFOS, 0 }, 828 { ICE_IPV4_OFOS, 14 }, 829 { ICE_UDP_OF, 34 }, 830 { ICE_GTP, 42 }, 831 { ICE_IPV6_IL, 62 }, 832 { ICE_TCP_IL, 102 }, 833 { ICE_PROTOCOL_LAST, 0 }, 834 }; 835 836 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = { 837 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 838 0x00, 0x00, 0x00, 0x00, 839 0x00, 0x00, 0x00, 0x00, 840 0x08, 0x00, 841 842 0x45, 0x00, 0x00, 0x6c, /* IP 14 */ 843 0x00, 0x00, 0x00, 0x00, 844 0x00, 0x11, 0x00, 0x00, 845 0x00, 0x00, 0x00, 0x00, 846 0x00, 0x00, 0x00, 0x00, 847 848 0x00, 0x00, 0x08, 0x68, /* UDP 34 */ 849 0x00, 0x58, 0x00, 0x00, 850 851 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */ 852 0x00, 0x00, 0x00, 0x00, 853 0x00, 0x00, 0x00, 0x85, 854 855 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */ 856 0x00, 0x00, 0x00, 0x00, 857 858 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */ 859 0x00, 0x14, 0x06, 0x00, 860 0x00, 0x00, 0x00, 0x00, 861 0x00, 0x00, 0x00, 0x00, 862 0x00, 0x00, 0x00, 0x00, 863 0x00, 0x00, 0x00, 0x00, 864 0x00, 0x00, 0x00, 0x00, 865 0x00, 0x00, 0x00, 0x00, 866 0x00, 0x00, 0x00, 0x00, 867 0x00, 0x00, 0x00, 0x00, 868 869 0x00, 0x00, 0x00, 0x00, /* TCP 102 */ 870 0x00, 0x00, 0x00, 0x00, 871 0x00, 0x00, 0x00, 0x00, 872 0x50, 0x00, 0x00, 0x00, 873 0x00, 0x00, 0x00, 0x00, 874 875 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 876 }; 877 878 static const 879 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = { 880 { ICE_MAC_OFOS, 0 }, 881 { ICE_IPV4_OFOS, 14 }, 882 { ICE_UDP_OF, 34 }, 883 { ICE_GTP, 42 }, 884 { ICE_IPV6_IL, 62 }, 885 { ICE_UDP_ILOS, 102 }, 886 { ICE_PROTOCOL_LAST, 0 }, 887 }; 888 889 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = { 890 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 891 0x00, 0x00, 0x00, 0x00, 892 0x00, 0x00, 0x00, 0x00, 893 0x08, 0x00, 894 895 0x45, 0x00, 0x00, 0x60, /* IP 14 */ 896 0x00, 0x00, 0x00, 0x00, 897 0x00, 0x11, 0x00, 0x00, 898 0x00, 0x00, 0x00, 0x00, 899 0x00, 0x00, 0x00, 0x00, 900 901 0x00, 0x00, 0x08, 0x68, /* UDP 34 */ 902 0x00, 0x4c, 0x00, 0x00, 903 904 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */ 905 0x00, 0x00, 0x00, 0x00, 906 0x00, 0x00, 0x00, 0x85, 907 908 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */ 909 0x00, 0x00, 0x00, 0x00, 910 911 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */ 912 0x00, 0x08, 0x11, 0x00, 913 0x00, 0x00, 0x00, 0x00, 914 0x00, 0x00, 0x00, 0x00, 915 0x00, 0x00, 0x00, 0x00, 916 0x00, 0x00, 0x00, 0x00, 917 0x00, 0x00, 0x00, 0x00, 918 0x00, 0x00, 0x00, 0x00, 919 0x00, 0x00, 0x00, 0x00, 920 0x00, 0x00, 0x00, 0x00, 921 922 0x00, 0x00, 0x00, 0x00, /* UDP 102 */ 923 0x00, 0x08, 0x00, 0x00, 924 925 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 926 }; 927 928 static const 929 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = { 930 { ICE_MAC_OFOS, 0 }, 931 { ICE_IPV6_OFOS, 14 }, 932 { ICE_UDP_OF, 54 }, 933 { ICE_GTP, 62 }, 934 { ICE_IPV4_IL, 82 }, 935 { ICE_TCP_IL, 102 }, 936 { ICE_PROTOCOL_LAST, 0 }, 937 }; 938 939 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = { 940 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 941 0x00, 0x00, 0x00, 0x00, 942 0x00, 0x00, 0x00, 0x00, 943 0x86, 0xdd, 944 945 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */ 946 0x00, 0x44, 0x11, 0x00, 947 0x00, 0x00, 0x00, 0x00, 948 0x00, 0x00, 0x00, 0x00, 949 0x00, 0x00, 0x00, 0x00, 950 0x00, 0x00, 0x00, 0x00, 951 0x00, 0x00, 0x00, 0x00, 952 0x00, 0x00, 0x00, 0x00, 953 0x00, 0x00, 0x00, 0x00, 954 0x00, 0x00, 0x00, 0x00, 955 956 0x00, 0x00, 0x08, 0x68, /* UDP 54 */ 957 0x00, 0x44, 0x00, 0x00, 958 959 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */ 960 0x00, 0x00, 0x00, 0x00, 961 0x00, 0x00, 0x00, 0x85, 962 963 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */ 964 0x00, 0x00, 0x00, 0x00, 965 966 0x45, 0x00, 0x00, 0x28, /* IP 82 */ 967 0x00, 0x00, 0x00, 0x00, 968 0x00, 0x06, 0x00, 0x00, 969 0x00, 0x00, 0x00, 0x00, 970 0x00, 0x00, 0x00, 0x00, 971 972 0x00, 0x00, 0x00, 0x00, /* TCP 102 */ 973 0x00, 0x00, 0x00, 0x00, 974 0x00, 0x00, 0x00, 0x00, 975 0x50, 0x00, 0x00, 0x00, 976 0x00, 0x00, 0x00, 0x00, 977 978 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 979 }; 980 981 static const 982 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = { 983 { ICE_MAC_OFOS, 0 }, 984 { ICE_IPV6_OFOS, 14 }, 985 { ICE_UDP_OF, 54 }, 986 { ICE_GTP, 62 }, 987 { ICE_IPV4_IL, 82 }, 988 { ICE_UDP_ILOS, 102 }, 989 { ICE_PROTOCOL_LAST, 0 }, 990 }; 991 992 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = { 993 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 994 0x00, 0x00, 0x00, 0x00, 995 0x00, 0x00, 0x00, 0x00, 996 0x86, 0xdd, 997 998 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */ 999 0x00, 0x38, 0x11, 0x00, 1000 0x00, 0x00, 0x00, 0x00, 1001 0x00, 0x00, 0x00, 0x00, 1002 0x00, 0x00, 0x00, 0x00, 1003 0x00, 0x00, 0x00, 0x00, 1004 0x00, 0x00, 0x00, 0x00, 1005 0x00, 0x00, 0x00, 0x00, 1006 0x00, 0x00, 0x00, 0x00, 1007 0x00, 0x00, 0x00, 0x00, 1008 1009 0x00, 0x00, 0x08, 0x68, /* UDP 54 */ 1010 0x00, 0x38, 0x00, 0x00, 1011 1012 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */ 1013 0x00, 0x00, 0x00, 0x00, 1014 0x00, 0x00, 0x00, 0x85, 1015 1016 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */ 1017 0x00, 0x00, 0x00, 0x00, 1018 1019 0x45, 0x00, 0x00, 0x1c, /* IP 82 */ 1020 0x00, 0x00, 0x00, 0x00, 1021 0x00, 0x11, 0x00, 0x00, 1022 0x00, 0x00, 0x00, 0x00, 1023 0x00, 0x00, 0x00, 0x00, 1024 1025 0x00, 0x00, 0x00, 0x00, /* UDP 102 */ 1026 0x00, 0x08, 0x00, 0x00, 1027 1028 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 1029 }; 1030 1031 static const 1032 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = { 1033 { ICE_MAC_OFOS, 0 }, 1034 { ICE_IPV6_OFOS, 14 }, 1035 { ICE_UDP_OF, 54 }, 1036 { ICE_GTP, 62 }, 1037 { ICE_IPV6_IL, 82 }, 1038 { ICE_TCP_IL, 122 }, 1039 { ICE_PROTOCOL_LAST, 0 }, 1040 }; 1041 1042 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = { 1043 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 1044 0x00, 0x00, 0x00, 0x00, 1045 0x00, 0x00, 0x00, 0x00, 1046 0x86, 0xdd, 1047 1048 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */ 1049 0x00, 0x58, 0x11, 0x00, 1050 0x00, 0x00, 0x00, 0x00, 1051 0x00, 0x00, 0x00, 0x00, 1052 0x00, 0x00, 0x00, 0x00, 1053 0x00, 0x00, 0x00, 0x00, 1054 0x00, 0x00, 0x00, 0x00, 1055 0x00, 0x00, 0x00, 0x00, 1056 0x00, 0x00, 0x00, 0x00, 1057 0x00, 0x00, 0x00, 0x00, 1058 1059 0x00, 0x00, 0x08, 0x68, /* UDP 54 */ 1060 0x00, 0x58, 0x00, 0x00, 1061 1062 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */ 1063 0x00, 0x00, 0x00, 0x00, 1064 0x00, 0x00, 0x00, 0x85, 1065 1066 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */ 1067 0x00, 0x00, 0x00, 0x00, 1068 1069 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */ 1070 0x00, 0x14, 0x06, 0x00, 1071 0x00, 0x00, 0x00, 0x00, 1072 0x00, 0x00, 0x00, 0x00, 1073 0x00, 0x00, 0x00, 0x00, 1074 0x00, 0x00, 0x00, 0x00, 1075 0x00, 0x00, 0x00, 0x00, 1076 0x00, 0x00, 0x00, 0x00, 1077 0x00, 0x00, 0x00, 0x00, 1078 0x00, 0x00, 0x00, 0x00, 1079 1080 0x00, 0x00, 0x00, 0x00, /* TCP 122 */ 1081 0x00, 0x00, 0x00, 0x00, 1082 0x00, 0x00, 0x00, 0x00, 1083 0x50, 0x00, 0x00, 0x00, 1084 0x00, 0x00, 0x00, 0x00, 1085 1086 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 1087 }; 1088 1089 static const 1090 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = { 1091 { ICE_MAC_OFOS, 0 }, 1092 { ICE_IPV6_OFOS, 14 }, 1093 { ICE_UDP_OF, 54 }, 1094 { ICE_GTP, 62 }, 1095 { ICE_IPV6_IL, 82 }, 1096 { ICE_UDP_ILOS, 122 }, 1097 { ICE_PROTOCOL_LAST, 0 }, 1098 }; 1099 1100 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = { 1101 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 1102 0x00, 0x00, 0x00, 0x00, 1103 0x00, 0x00, 0x00, 0x00, 1104 0x86, 0xdd, 1105 1106 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */ 1107 0x00, 0x4c, 0x11, 0x00, 1108 0x00, 0x00, 0x00, 0x00, 1109 0x00, 0x00, 0x00, 0x00, 1110 0x00, 0x00, 0x00, 0x00, 1111 0x00, 0x00, 0x00, 0x00, 1112 0x00, 0x00, 0x00, 0x00, 1113 0x00, 0x00, 0x00, 0x00, 1114 0x00, 0x00, 0x00, 0x00, 1115 0x00, 0x00, 0x00, 0x00, 1116 1117 0x00, 0x00, 0x08, 0x68, /* UDP 54 */ 1118 0x00, 0x4c, 0x00, 0x00, 1119 1120 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */ 1121 0x00, 0x00, 0x00, 0x00, 1122 0x00, 0x00, 0x00, 0x85, 1123 1124 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */ 1125 0x00, 0x00, 0x00, 0x00, 1126 1127 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */ 1128 0x00, 0x08, 0x11, 0x00, 1129 0x00, 0x00, 0x00, 0x00, 1130 0x00, 0x00, 0x00, 0x00, 1131 0x00, 0x00, 0x00, 0x00, 1132 0x00, 0x00, 0x00, 0x00, 1133 0x00, 0x00, 0x00, 0x00, 1134 0x00, 0x00, 0x00, 0x00, 1135 0x00, 0x00, 0x00, 0x00, 1136 0x00, 0x00, 0x00, 0x00, 1137 1138 0x00, 0x00, 0x00, 0x00, /* UDP 122 */ 1139 0x00, 0x08, 0x00, 0x00, 1140 1141 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 1142 }; 1143 1144 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = { 1145 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 1146 0x00, 0x00, 0x00, 0x00, 1147 0x00, 0x00, 0x00, 0x00, 1148 0x08, 0x00, 1149 1150 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */ 1151 0x00, 0x00, 0x40, 0x00, 1152 0x40, 0x11, 0x00, 0x00, 1153 0x00, 0x00, 0x00, 0x00, 1154 0x00, 0x00, 0x00, 0x00, 1155 1156 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */ 1157 0x00, 0x00, 0x00, 0x00, 1158 1159 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */ 1160 0x00, 0x00, 0x00, 0x00, 1161 0x00, 0x00, 0x00, 0x85, 1162 1163 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */ 1164 0x00, 0x00, 0x00, 0x00, 1165 1166 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */ 1167 0x00, 0x00, 0x40, 0x00, 1168 0x40, 0x00, 0x00, 0x00, 1169 0x00, 0x00, 0x00, 0x00, 1170 0x00, 0x00, 0x00, 0x00, 1171 0x00, 0x00, 1172 }; 1173 1174 static const 1175 struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = { 1176 { ICE_MAC_OFOS, 0 }, 1177 { ICE_IPV4_OFOS, 14 }, 1178 { ICE_UDP_OF, 34 }, 1179 { ICE_GTP_NO_PAY, 42 }, 1180 { ICE_PROTOCOL_LAST, 0 }, 1181 }; 1182 1183 static const 1184 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = { 1185 { ICE_MAC_OFOS, 0 }, 1186 { ICE_IPV6_OFOS, 14 }, 1187 { ICE_UDP_OF, 54 }, 1188 { ICE_GTP_NO_PAY, 62 }, 1189 { ICE_PROTOCOL_LAST, 0 }, 1190 }; 1191 1192 static const u8 dummy_ipv6_gtp_packet[] = { 1193 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 1194 0x00, 0x00, 0x00, 0x00, 1195 0x00, 0x00, 0x00, 0x00, 1196 0x86, 0xdd, 1197 1198 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */ 1199 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/ 1200 0x00, 0x00, 0x00, 0x00, 1201 0x00, 0x00, 0x00, 0x00, 1202 0x00, 0x00, 0x00, 0x00, 1203 0x00, 0x00, 0x00, 0x00, 1204 0x00, 0x00, 0x00, 0x00, 1205 0x00, 0x00, 0x00, 0x00, 1206 0x00, 0x00, 0x00, 0x00, 1207 0x00, 0x00, 0x00, 0x00, 1208 1209 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */ 1210 0x00, 0x00, 0x00, 0x00, 1211 1212 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */ 1213 0x00, 0x00, 0x00, 0x00, 1214 1215 0x00, 0x00, 1216 }; 1217 1218 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ 1219 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \ 1220 (DUMMY_ETH_HDR_LEN * \ 1221 sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0]))) 1222 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ 1223 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr)) 1224 #define ICE_SW_RULE_LG_ACT_SIZE(n) \ 1225 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \ 1226 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0]))) 1227 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \ 1228 (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \ 1229 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0]))) 1230 1231 /* this is a recipe to profile association bitmap */ 1232 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES], 1233 ICE_MAX_NUM_PROFILES); 1234 1235 /* this is a profile to recipe association bitmap */ 1236 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES], 1237 ICE_MAX_NUM_RECIPES); 1238 1239 /** 1240 * ice_init_def_sw_recp - initialize the recipe book keeping tables 1241 * @hw: pointer to the HW struct 1242 * 1243 * Allocate memory for the entire recipe table and initialize the structures/ 1244 * entries corresponding to basic recipes. 1245 */ 1246 int ice_init_def_sw_recp(struct ice_hw *hw) 1247 { 1248 struct ice_sw_recipe *recps; 1249 u8 i; 1250 1251 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, 1252 sizeof(*recps), GFP_KERNEL); 1253 if (!recps) 1254 return -ENOMEM; 1255 1256 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 1257 recps[i].root_rid = i; 1258 INIT_LIST_HEAD(&recps[i].filt_rules); 1259 INIT_LIST_HEAD(&recps[i].filt_replay_rules); 1260 INIT_LIST_HEAD(&recps[i].rg_list); 1261 mutex_init(&recps[i].filt_rule_lock); 1262 } 1263 1264 hw->switch_info->recp_list = recps; 1265 1266 return 0; 1267 } 1268 1269 /** 1270 * ice_aq_get_sw_cfg - get switch configuration 1271 * @hw: pointer to the hardware structure 1272 * @buf: pointer to the result buffer 1273 * @buf_size: length of the buffer available for response 1274 * @req_desc: pointer to requested descriptor 1275 * @num_elems: pointer to number of elements 1276 * @cd: pointer to command details structure or NULL 1277 * 1278 * Get switch configuration (0x0200) to be placed in buf. 1279 * This admin command returns information such as initial VSI/port number 1280 * and switch ID it belongs to. 1281 * 1282 * NOTE: *req_desc is both an input/output parameter. 1283 * The caller of this function first calls this function with *request_desc set 1284 * to 0. If the response from f/w has *req_desc set to 0, all the switch 1285 * configuration information has been returned; if non-zero (meaning not all 1286 * the information was returned), the caller should call this function again 1287 * with *req_desc set to the previous value returned by f/w to get the 1288 * next block of switch configuration information. 1289 * 1290 * *num_elems is output only parameter. This reflects the number of elements 1291 * in response buffer. The caller of this function to use *num_elems while 1292 * parsing the response buffer. 1293 */ 1294 static int 1295 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, 1296 u16 buf_size, u16 *req_desc, u16 *num_elems, 1297 struct ice_sq_cd *cd) 1298 { 1299 struct ice_aqc_get_sw_cfg *cmd; 1300 struct ice_aq_desc desc; 1301 int status; 1302 1303 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); 1304 cmd = &desc.params.get_sw_conf; 1305 cmd->element = cpu_to_le16(*req_desc); 1306 1307 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 1308 if (!status) { 1309 *req_desc = le16_to_cpu(cmd->element); 1310 *num_elems = le16_to_cpu(cmd->num_elems); 1311 } 1312 1313 return status; 1314 } 1315 1316 /** 1317 * ice_aq_add_vsi 1318 * @hw: pointer to the HW struct 1319 * @vsi_ctx: pointer to a VSI context struct 1320 * @cd: pointer to command details structure or NULL 1321 * 1322 * Add a VSI context to the hardware (0x0210) 1323 */ 1324 static int 1325 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 1326 struct ice_sq_cd *cd) 1327 { 1328 struct ice_aqc_add_update_free_vsi_resp *res; 1329 struct ice_aqc_add_get_update_free_vsi *cmd; 1330 struct ice_aq_desc desc; 1331 int status; 1332 1333 cmd = &desc.params.vsi_cmd; 1334 res = &desc.params.add_update_free_vsi_res; 1335 1336 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); 1337 1338 if (!vsi_ctx->alloc_from_pool) 1339 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | 1340 ICE_AQ_VSI_IS_VALID); 1341 cmd->vf_id = vsi_ctx->vf_num; 1342 1343 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1344 1345 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1346 1347 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 1348 sizeof(vsi_ctx->info), cd); 1349 1350 if (!status) { 1351 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M; 1352 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used); 1353 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free); 1354 } 1355 1356 return status; 1357 } 1358 1359 /** 1360 * ice_aq_free_vsi 1361 * @hw: pointer to the HW struct 1362 * @vsi_ctx: pointer to a VSI context struct 1363 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 1364 * @cd: pointer to command details structure or NULL 1365 * 1366 * Free VSI context info from hardware (0x0213) 1367 */ 1368 static int 1369 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 1370 bool keep_vsi_alloc, struct ice_sq_cd *cd) 1371 { 1372 struct ice_aqc_add_update_free_vsi_resp *resp; 1373 struct ice_aqc_add_get_update_free_vsi *cmd; 1374 struct ice_aq_desc desc; 1375 int status; 1376 1377 cmd = &desc.params.vsi_cmd; 1378 resp = &desc.params.add_update_free_vsi_res; 1379 1380 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); 1381 1382 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 1383 if (keep_vsi_alloc) 1384 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); 1385 1386 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1387 if (!status) { 1388 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 1389 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 1390 } 1391 1392 return status; 1393 } 1394 1395 /** 1396 * ice_aq_update_vsi 1397 * @hw: pointer to the HW struct 1398 * @vsi_ctx: pointer to a VSI context struct 1399 * @cd: pointer to command details structure or NULL 1400 * 1401 * Update VSI context in the hardware (0x0211) 1402 */ 1403 static int 1404 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 1405 struct ice_sq_cd *cd) 1406 { 1407 struct ice_aqc_add_update_free_vsi_resp *resp; 1408 struct ice_aqc_add_get_update_free_vsi *cmd; 1409 struct ice_aq_desc desc; 1410 int status; 1411 1412 cmd = &desc.params.vsi_cmd; 1413 resp = &desc.params.add_update_free_vsi_res; 1414 1415 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); 1416 1417 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 1418 1419 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1420 1421 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 1422 sizeof(vsi_ctx->info), cd); 1423 1424 if (!status) { 1425 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 1426 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 1427 } 1428 1429 return status; 1430 } 1431 1432 /** 1433 * ice_is_vsi_valid - check whether the VSI is valid or not 1434 * @hw: pointer to the HW struct 1435 * @vsi_handle: VSI handle 1436 * 1437 * check whether the VSI is valid or not 1438 */ 1439 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle) 1440 { 1441 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle]; 1442 } 1443 1444 /** 1445 * ice_get_hw_vsi_num - return the HW VSI number 1446 * @hw: pointer to the HW struct 1447 * @vsi_handle: VSI handle 1448 * 1449 * return the HW VSI number 1450 * Caution: call this function only if VSI is valid (ice_is_vsi_valid) 1451 */ 1452 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) 1453 { 1454 return hw->vsi_ctx[vsi_handle]->vsi_num; 1455 } 1456 1457 /** 1458 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle 1459 * @hw: pointer to the HW struct 1460 * @vsi_handle: VSI handle 1461 * 1462 * return the VSI context entry for a given VSI handle 1463 */ 1464 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 1465 { 1466 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle]; 1467 } 1468 1469 /** 1470 * ice_save_vsi_ctx - save the VSI context for a given VSI handle 1471 * @hw: pointer to the HW struct 1472 * @vsi_handle: VSI handle 1473 * @vsi: VSI context pointer 1474 * 1475 * save the VSI context entry for a given VSI handle 1476 */ 1477 static void 1478 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) 1479 { 1480 hw->vsi_ctx[vsi_handle] = vsi; 1481 } 1482 1483 /** 1484 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs 1485 * @hw: pointer to the HW struct 1486 * @vsi_handle: VSI handle 1487 */ 1488 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle) 1489 { 1490 struct ice_vsi_ctx *vsi; 1491 u8 i; 1492 1493 vsi = ice_get_vsi_ctx(hw, vsi_handle); 1494 if (!vsi) 1495 return; 1496 ice_for_each_traffic_class(i) { 1497 if (vsi->lan_q_ctx[i]) { 1498 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]); 1499 vsi->lan_q_ctx[i] = NULL; 1500 } 1501 if (vsi->rdma_q_ctx[i]) { 1502 devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]); 1503 vsi->rdma_q_ctx[i] = NULL; 1504 } 1505 } 1506 } 1507 1508 /** 1509 * ice_clear_vsi_ctx - clear the VSI context entry 1510 * @hw: pointer to the HW struct 1511 * @vsi_handle: VSI handle 1512 * 1513 * clear the VSI context entry 1514 */ 1515 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 1516 { 1517 struct ice_vsi_ctx *vsi; 1518 1519 vsi = ice_get_vsi_ctx(hw, vsi_handle); 1520 if (vsi) { 1521 ice_clear_vsi_q_ctx(hw, vsi_handle); 1522 devm_kfree(ice_hw_to_dev(hw), vsi); 1523 hw->vsi_ctx[vsi_handle] = NULL; 1524 } 1525 } 1526 1527 /** 1528 * ice_clear_all_vsi_ctx - clear all the VSI context entries 1529 * @hw: pointer to the HW struct 1530 */ 1531 void ice_clear_all_vsi_ctx(struct ice_hw *hw) 1532 { 1533 u16 i; 1534 1535 for (i = 0; i < ICE_MAX_VSI; i++) 1536 ice_clear_vsi_ctx(hw, i); 1537 } 1538 1539 /** 1540 * ice_add_vsi - add VSI context to the hardware and VSI handle list 1541 * @hw: pointer to the HW struct 1542 * @vsi_handle: unique VSI handle provided by drivers 1543 * @vsi_ctx: pointer to a VSI context struct 1544 * @cd: pointer to command details structure or NULL 1545 * 1546 * Add a VSI context to the hardware also add it into the VSI handle list. 1547 * If this function gets called after reset for existing VSIs then update 1548 * with the new HW VSI number in the corresponding VSI handle list entry. 1549 */ 1550 int 1551 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 1552 struct ice_sq_cd *cd) 1553 { 1554 struct ice_vsi_ctx *tmp_vsi_ctx; 1555 int status; 1556 1557 if (vsi_handle >= ICE_MAX_VSI) 1558 return -EINVAL; 1559 status = ice_aq_add_vsi(hw, vsi_ctx, cd); 1560 if (status) 1561 return status; 1562 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 1563 if (!tmp_vsi_ctx) { 1564 /* Create a new VSI context */ 1565 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw), 1566 sizeof(*tmp_vsi_ctx), GFP_KERNEL); 1567 if (!tmp_vsi_ctx) { 1568 ice_aq_free_vsi(hw, vsi_ctx, false, cd); 1569 return -ENOMEM; 1570 } 1571 *tmp_vsi_ctx = *vsi_ctx; 1572 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); 1573 } else { 1574 /* update with new HW VSI num */ 1575 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; 1576 } 1577 1578 return 0; 1579 } 1580 1581 /** 1582 * ice_free_vsi- free VSI context from hardware and VSI handle list 1583 * @hw: pointer to the HW struct 1584 * @vsi_handle: unique VSI handle 1585 * @vsi_ctx: pointer to a VSI context struct 1586 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 1587 * @cd: pointer to command details structure or NULL 1588 * 1589 * Free VSI context info from hardware as well as from VSI handle list 1590 */ 1591 int 1592 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 1593 bool keep_vsi_alloc, struct ice_sq_cd *cd) 1594 { 1595 int status; 1596 1597 if (!ice_is_vsi_valid(hw, vsi_handle)) 1598 return -EINVAL; 1599 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 1600 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); 1601 if (!status) 1602 ice_clear_vsi_ctx(hw, vsi_handle); 1603 return status; 1604 } 1605 1606 /** 1607 * ice_update_vsi 1608 * @hw: pointer to the HW struct 1609 * @vsi_handle: unique VSI handle 1610 * @vsi_ctx: pointer to a VSI context struct 1611 * @cd: pointer to command details structure or NULL 1612 * 1613 * Update VSI context in the hardware 1614 */ 1615 int 1616 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 1617 struct ice_sq_cd *cd) 1618 { 1619 if (!ice_is_vsi_valid(hw, vsi_handle)) 1620 return -EINVAL; 1621 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 1622 return ice_aq_update_vsi(hw, vsi_ctx, cd); 1623 } 1624 1625 /** 1626 * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI 1627 * @hw: pointer to HW struct 1628 * @vsi_handle: VSI SW index 1629 * @enable: boolean for enable/disable 1630 */ 1631 int 1632 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) 1633 { 1634 struct ice_vsi_ctx *ctx; 1635 1636 ctx = ice_get_vsi_ctx(hw, vsi_handle); 1637 if (!ctx) 1638 return -EIO; 1639 1640 if (enable) 1641 ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; 1642 else 1643 ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; 1644 1645 return ice_update_vsi(hw, vsi_handle, ctx, NULL); 1646 } 1647 1648 /** 1649 * ice_aq_alloc_free_vsi_list 1650 * @hw: pointer to the HW struct 1651 * @vsi_list_id: VSI list ID returned or used for lookup 1652 * @lkup_type: switch rule filter lookup type 1653 * @opc: switch rules population command type - pass in the command opcode 1654 * 1655 * allocates or free a VSI list resource 1656 */ 1657 static int 1658 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, 1659 enum ice_sw_lkup_type lkup_type, 1660 enum ice_adminq_opc opc) 1661 { 1662 struct ice_aqc_alloc_free_res_elem *sw_buf; 1663 struct ice_aqc_res_elem *vsi_ele; 1664 u16 buf_len; 1665 int status; 1666 1667 buf_len = struct_size(sw_buf, elem, 1); 1668 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); 1669 if (!sw_buf) 1670 return -ENOMEM; 1671 sw_buf->num_elems = cpu_to_le16(1); 1672 1673 if (lkup_type == ICE_SW_LKUP_MAC || 1674 lkup_type == ICE_SW_LKUP_MAC_VLAN || 1675 lkup_type == ICE_SW_LKUP_ETHERTYPE || 1676 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 1677 lkup_type == ICE_SW_LKUP_PROMISC || 1678 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) { 1679 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); 1680 } else if (lkup_type == ICE_SW_LKUP_VLAN) { 1681 sw_buf->res_type = 1682 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); 1683 } else { 1684 status = -EINVAL; 1685 goto ice_aq_alloc_free_vsi_list_exit; 1686 } 1687 1688 if (opc == ice_aqc_opc_free_res) 1689 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id); 1690 1691 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL); 1692 if (status) 1693 goto ice_aq_alloc_free_vsi_list_exit; 1694 1695 if (opc == ice_aqc_opc_alloc_res) { 1696 vsi_ele = &sw_buf->elem[0]; 1697 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp); 1698 } 1699 1700 ice_aq_alloc_free_vsi_list_exit: 1701 devm_kfree(ice_hw_to_dev(hw), sw_buf); 1702 return status; 1703 } 1704 1705 /** 1706 * ice_aq_sw_rules - add/update/remove switch rules 1707 * @hw: pointer to the HW struct 1708 * @rule_list: pointer to switch rule population list 1709 * @rule_list_sz: total size of the rule list in bytes 1710 * @num_rules: number of switch rules in the rule_list 1711 * @opc: switch rules population command type - pass in the command opcode 1712 * @cd: pointer to command details structure or NULL 1713 * 1714 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware 1715 */ 1716 int 1717 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, 1718 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1719 { 1720 struct ice_aq_desc desc; 1721 int status; 1722 1723 if (opc != ice_aqc_opc_add_sw_rules && 1724 opc != ice_aqc_opc_update_sw_rules && 1725 opc != ice_aqc_opc_remove_sw_rules) 1726 return -EINVAL; 1727 1728 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1729 1730 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1731 desc.params.sw_rules.num_rules_fltr_entry_index = 1732 cpu_to_le16(num_rules); 1733 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); 1734 if (opc != ice_aqc_opc_add_sw_rules && 1735 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) 1736 status = -ENOENT; 1737 1738 return status; 1739 } 1740 1741 /** 1742 * ice_aq_add_recipe - add switch recipe 1743 * @hw: pointer to the HW struct 1744 * @s_recipe_list: pointer to switch rule population list 1745 * @num_recipes: number of switch recipes in the list 1746 * @cd: pointer to command details structure or NULL 1747 * 1748 * Add(0x0290) 1749 */ 1750 static int 1751 ice_aq_add_recipe(struct ice_hw *hw, 1752 struct ice_aqc_recipe_data_elem *s_recipe_list, 1753 u16 num_recipes, struct ice_sq_cd *cd) 1754 { 1755 struct ice_aqc_add_get_recipe *cmd; 1756 struct ice_aq_desc desc; 1757 u16 buf_size; 1758 1759 cmd = &desc.params.add_get_recipe; 1760 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe); 1761 1762 cmd->num_sub_recipes = cpu_to_le16(num_recipes); 1763 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1764 1765 buf_size = num_recipes * sizeof(*s_recipe_list); 1766 1767 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); 1768 } 1769 1770 /** 1771 * ice_aq_get_recipe - get switch recipe 1772 * @hw: pointer to the HW struct 1773 * @s_recipe_list: pointer to switch rule population list 1774 * @num_recipes: pointer to the number of recipes (input and output) 1775 * @recipe_root: root recipe number of recipe(s) to retrieve 1776 * @cd: pointer to command details structure or NULL 1777 * 1778 * Get(0x0292) 1779 * 1780 * On input, *num_recipes should equal the number of entries in s_recipe_list. 1781 * On output, *num_recipes will equal the number of entries returned in 1782 * s_recipe_list. 1783 * 1784 * The caller must supply enough space in s_recipe_list to hold all possible 1785 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES. 1786 */ 1787 static int 1788 ice_aq_get_recipe(struct ice_hw *hw, 1789 struct ice_aqc_recipe_data_elem *s_recipe_list, 1790 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd) 1791 { 1792 struct ice_aqc_add_get_recipe *cmd; 1793 struct ice_aq_desc desc; 1794 u16 buf_size; 1795 int status; 1796 1797 if (*num_recipes != ICE_MAX_NUM_RECIPES) 1798 return -EINVAL; 1799 1800 cmd = &desc.params.add_get_recipe; 1801 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe); 1802 1803 cmd->return_index = cpu_to_le16(recipe_root); 1804 cmd->num_sub_recipes = 0; 1805 1806 buf_size = *num_recipes * sizeof(*s_recipe_list); 1807 1808 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); 1809 *num_recipes = le16_to_cpu(cmd->num_sub_recipes); 1810 1811 return status; 1812 } 1813 1814 /** 1815 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx 1816 * @hw: pointer to the HW struct 1817 * @params: parameters used to update the default recipe 1818 * 1819 * This function only supports updating default recipes and it only supports 1820 * updating a single recipe based on the lkup_idx at a time. 1821 * 1822 * This is done as a read-modify-write operation. First, get the current recipe 1823 * contents based on the recipe's ID. Then modify the field vector index and 1824 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update 1825 * the pre-existing recipe with the modifications. 1826 */ 1827 int 1828 ice_update_recipe_lkup_idx(struct ice_hw *hw, 1829 struct ice_update_recipe_lkup_idx_params *params) 1830 { 1831 struct ice_aqc_recipe_data_elem *rcp_list; 1832 u16 num_recps = ICE_MAX_NUM_RECIPES; 1833 int status; 1834 1835 rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL); 1836 if (!rcp_list) 1837 return -ENOMEM; 1838 1839 /* read current recipe list from firmware */ 1840 rcp_list->recipe_indx = params->rid; 1841 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL); 1842 if (status) { 1843 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n", 1844 params->rid, status); 1845 goto error_out; 1846 } 1847 1848 /* only modify existing recipe's lkup_idx and mask if valid, while 1849 * leaving all other fields the same, then update the recipe firmware 1850 */ 1851 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx; 1852 if (params->mask_valid) 1853 rcp_list->content.mask[params->lkup_idx] = 1854 cpu_to_le16(params->mask); 1855 1856 if (params->ignore_valid) 1857 rcp_list->content.lkup_indx[params->lkup_idx] |= 1858 ICE_AQ_RECIPE_LKUP_IGNORE; 1859 1860 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL); 1861 if (status) 1862 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n", 1863 params->rid, params->lkup_idx, params->fv_idx, 1864 params->mask, params->mask_valid ? "true" : "false", 1865 status); 1866 1867 error_out: 1868 kfree(rcp_list); 1869 return status; 1870 } 1871 1872 /** 1873 * ice_aq_map_recipe_to_profile - Map recipe to packet profile 1874 * @hw: pointer to the HW struct 1875 * @profile_id: package profile ID to associate the recipe with 1876 * @r_bitmap: Recipe bitmap filled in and need to be returned as response 1877 * @cd: pointer to command details structure or NULL 1878 * Recipe to profile association (0x0291) 1879 */ 1880 static int 1881 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, 1882 struct ice_sq_cd *cd) 1883 { 1884 struct ice_aqc_recipe_to_profile *cmd; 1885 struct ice_aq_desc desc; 1886 1887 cmd = &desc.params.recipe_to_profile; 1888 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile); 1889 cmd->profile_id = cpu_to_le16(profile_id); 1890 /* Set the recipe ID bit in the bitmask to let the device know which 1891 * profile we are associating the recipe to 1892 */ 1893 memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc)); 1894 1895 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1896 } 1897 1898 /** 1899 * ice_aq_get_recipe_to_profile - Map recipe to packet profile 1900 * @hw: pointer to the HW struct 1901 * @profile_id: package profile ID to associate the recipe with 1902 * @r_bitmap: Recipe bitmap filled in and need to be returned as response 1903 * @cd: pointer to command details structure or NULL 1904 * Associate profile ID with given recipe (0x0293) 1905 */ 1906 static int 1907 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, 1908 struct ice_sq_cd *cd) 1909 { 1910 struct ice_aqc_recipe_to_profile *cmd; 1911 struct ice_aq_desc desc; 1912 int status; 1913 1914 cmd = &desc.params.recipe_to_profile; 1915 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile); 1916 cmd->profile_id = cpu_to_le16(profile_id); 1917 1918 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1919 if (!status) 1920 memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc)); 1921 1922 return status; 1923 } 1924 1925 /** 1926 * ice_alloc_recipe - add recipe resource 1927 * @hw: pointer to the hardware structure 1928 * @rid: recipe ID returned as response to AQ call 1929 */ 1930 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) 1931 { 1932 struct ice_aqc_alloc_free_res_elem *sw_buf; 1933 u16 buf_len; 1934 int status; 1935 1936 buf_len = struct_size(sw_buf, elem, 1); 1937 sw_buf = kzalloc(buf_len, GFP_KERNEL); 1938 if (!sw_buf) 1939 return -ENOMEM; 1940 1941 sw_buf->num_elems = cpu_to_le16(1); 1942 sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << 1943 ICE_AQC_RES_TYPE_S) | 1944 ICE_AQC_RES_TYPE_FLAG_SHARED); 1945 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, 1946 ice_aqc_opc_alloc_res, NULL); 1947 if (!status) 1948 *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); 1949 kfree(sw_buf); 1950 1951 return status; 1952 } 1953 1954 /** 1955 * ice_get_recp_to_prof_map - updates recipe to profile mapping 1956 * @hw: pointer to hardware structure 1957 * 1958 * This function is used to populate recipe_to_profile matrix where index to 1959 * this array is the recipe ID and the element is the mapping of which profiles 1960 * is this recipe mapped to. 1961 */ 1962 static void ice_get_recp_to_prof_map(struct ice_hw *hw) 1963 { 1964 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); 1965 u16 i; 1966 1967 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) { 1968 u16 j; 1969 1970 bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES); 1971 bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES); 1972 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL)) 1973 continue; 1974 bitmap_copy(profile_to_recipe[i], r_bitmap, 1975 ICE_MAX_NUM_RECIPES); 1976 for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES) 1977 set_bit(i, recipe_to_profile[j]); 1978 } 1979 } 1980 1981 /** 1982 * ice_collect_result_idx - copy result index values 1983 * @buf: buffer that contains the result index 1984 * @recp: the recipe struct to copy data into 1985 */ 1986 static void 1987 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, 1988 struct ice_sw_recipe *recp) 1989 { 1990 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN) 1991 set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, 1992 recp->res_idxs); 1993 } 1994 1995 /** 1996 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries 1997 * @hw: pointer to hardware structure 1998 * @recps: struct that we need to populate 1999 * @rid: recipe ID that we are populating 2000 * @refresh_required: true if we should get recipe to profile mapping from FW 2001 * 2002 * This function is used to populate all the necessary entries into our 2003 * bookkeeping so that we have a current list of all the recipes that are 2004 * programmed in the firmware. 2005 */ 2006 static int 2007 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, 2008 bool *refresh_required) 2009 { 2010 DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS); 2011 struct ice_aqc_recipe_data_elem *tmp; 2012 u16 num_recps = ICE_MAX_NUM_RECIPES; 2013 struct ice_prot_lkup_ext *lkup_exts; 2014 u8 fv_word_idx = 0; 2015 u16 sub_recps; 2016 int status; 2017 2018 bitmap_zero(result_bm, ICE_MAX_FV_WORDS); 2019 2020 /* we need a buffer big enough to accommodate all the recipes */ 2021 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); 2022 if (!tmp) 2023 return -ENOMEM; 2024 2025 tmp[0].recipe_indx = rid; 2026 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL); 2027 /* non-zero status meaning recipe doesn't exist */ 2028 if (status) 2029 goto err_unroll; 2030 2031 /* Get recipe to profile map so that we can get the fv from lkups that 2032 * we read for a recipe from FW. Since we want to minimize the number of 2033 * times we make this FW call, just make one call and cache the copy 2034 * until a new recipe is added. This operation is only required the 2035 * first time to get the changes from FW. Then to search existing 2036 * entries we don't need to update the cache again until another recipe 2037 * gets added. 2038 */ 2039 if (*refresh_required) { 2040 ice_get_recp_to_prof_map(hw); 2041 *refresh_required = false; 2042 } 2043 2044 /* Start populating all the entries for recps[rid] based on lkups from 2045 * firmware. Note that we are only creating the root recipe in our 2046 * database. 2047 */ 2048 lkup_exts = &recps[rid].lkup_exts; 2049 2050 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) { 2051 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps]; 2052 struct ice_recp_grp_entry *rg_entry; 2053 u8 i, prof, idx, prot = 0; 2054 bool is_root; 2055 u16 off = 0; 2056 2057 rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), 2058 GFP_KERNEL); 2059 if (!rg_entry) { 2060 status = -ENOMEM; 2061 goto err_unroll; 2062 } 2063 2064 idx = root_bufs.recipe_indx; 2065 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT; 2066 2067 /* Mark all result indices in this chain */ 2068 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) 2069 set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, 2070 result_bm); 2071 2072 /* get the first profile that is associated with rid */ 2073 prof = find_first_bit(recipe_to_profile[idx], 2074 ICE_MAX_NUM_PROFILES); 2075 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { 2076 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1]; 2077 2078 rg_entry->fv_idx[i] = lkup_indx; 2079 rg_entry->fv_mask[i] = 2080 le16_to_cpu(root_bufs.content.mask[i + 1]); 2081 2082 /* If the recipe is a chained recipe then all its 2083 * child recipe's result will have a result index. 2084 * To fill fv_words we should not use those result 2085 * index, we only need the protocol ids and offsets. 2086 * We will skip all the fv_idx which stores result 2087 * index in them. We also need to skip any fv_idx which 2088 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a 2089 * valid offset value. 2090 */ 2091 if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) || 2092 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE || 2093 rg_entry->fv_idx[i] == 0) 2094 continue; 2095 2096 ice_find_prot_off(hw, ICE_BLK_SW, prof, 2097 rg_entry->fv_idx[i], &prot, &off); 2098 lkup_exts->fv_words[fv_word_idx].prot_id = prot; 2099 lkup_exts->fv_words[fv_word_idx].off = off; 2100 lkup_exts->field_mask[fv_word_idx] = 2101 rg_entry->fv_mask[i]; 2102 fv_word_idx++; 2103 } 2104 /* populate rg_list with the data from the child entry of this 2105 * recipe 2106 */ 2107 list_add(&rg_entry->l_entry, &recps[rid].rg_list); 2108 2109 /* Propagate some data to the recipe database */ 2110 recps[idx].is_root = !!is_root; 2111 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; 2112 bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); 2113 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { 2114 recps[idx].chain_idx = root_bufs.content.result_indx & 2115 ~ICE_AQ_RECIPE_RESULT_EN; 2116 set_bit(recps[idx].chain_idx, recps[idx].res_idxs); 2117 } else { 2118 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND; 2119 } 2120 2121 if (!is_root) 2122 continue; 2123 2124 /* Only do the following for root recipes entries */ 2125 memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap, 2126 sizeof(recps[idx].r_bitmap)); 2127 recps[idx].root_rid = root_bufs.content.rid & 2128 ~ICE_AQ_RECIPE_ID_IS_ROOT; 2129 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; 2130 } 2131 2132 /* Complete initialization of the root recipe entry */ 2133 lkup_exts->n_val_words = fv_word_idx; 2134 recps[rid].big_recp = (num_recps > 1); 2135 recps[rid].n_grp_count = (u8)num_recps; 2136 recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp, 2137 recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), 2138 GFP_KERNEL); 2139 if (!recps[rid].root_buf) { 2140 status = -ENOMEM; 2141 goto err_unroll; 2142 } 2143 2144 /* Copy result indexes */ 2145 bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); 2146 recps[rid].recp_created = true; 2147 2148 err_unroll: 2149 kfree(tmp); 2150 return status; 2151 } 2152 2153 /* ice_init_port_info - Initialize port_info with switch configuration data 2154 * @pi: pointer to port_info 2155 * @vsi_port_num: VSI number or port number 2156 * @type: Type of switch element (port or VSI) 2157 * @swid: switch ID of the switch the element is attached to 2158 * @pf_vf_num: PF or VF number 2159 * @is_vf: true if the element is a VF, false otherwise 2160 */ 2161 static void 2162 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, 2163 u16 swid, u16 pf_vf_num, bool is_vf) 2164 { 2165 switch (type) { 2166 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT: 2167 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK); 2168 pi->sw_id = swid; 2169 pi->pf_vf_num = pf_vf_num; 2170 pi->is_vf = is_vf; 2171 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 2172 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 2173 break; 2174 default: 2175 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n"); 2176 break; 2177 } 2178 } 2179 2180 /* ice_get_initial_sw_cfg - Get initial port and default VSI data 2181 * @hw: pointer to the hardware structure 2182 */ 2183 int ice_get_initial_sw_cfg(struct ice_hw *hw) 2184 { 2185 struct ice_aqc_get_sw_cfg_resp_elem *rbuf; 2186 u16 req_desc = 0; 2187 u16 num_elems; 2188 int status; 2189 u16 i; 2190 2191 rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN, 2192 GFP_KERNEL); 2193 2194 if (!rbuf) 2195 return -ENOMEM; 2196 2197 /* Multiple calls to ice_aq_get_sw_cfg may be required 2198 * to get all the switch configuration information. The need 2199 * for additional calls is indicated by ice_aq_get_sw_cfg 2200 * writing a non-zero value in req_desc 2201 */ 2202 do { 2203 struct ice_aqc_get_sw_cfg_resp_elem *ele; 2204 2205 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN, 2206 &req_desc, &num_elems, NULL); 2207 2208 if (status) 2209 break; 2210 2211 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) { 2212 u16 pf_vf_num, swid, vsi_port_num; 2213 bool is_vf = false; 2214 u8 res_type; 2215 2216 vsi_port_num = le16_to_cpu(ele->vsi_port_num) & 2217 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M; 2218 2219 pf_vf_num = le16_to_cpu(ele->pf_vf_num) & 2220 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M; 2221 2222 swid = le16_to_cpu(ele->swid); 2223 2224 if (le16_to_cpu(ele->pf_vf_num) & 2225 ICE_AQC_GET_SW_CONF_RESP_IS_VF) 2226 is_vf = true; 2227 2228 res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >> 2229 ICE_AQC_GET_SW_CONF_RESP_TYPE_S); 2230 2231 if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) { 2232 /* FW VSI is not needed. Just continue. */ 2233 continue; 2234 } 2235 2236 ice_init_port_info(hw->port_info, vsi_port_num, 2237 res_type, swid, pf_vf_num, is_vf); 2238 } 2239 } while (req_desc && !status); 2240 2241 devm_kfree(ice_hw_to_dev(hw), rbuf); 2242 return status; 2243 } 2244 2245 /** 2246 * ice_fill_sw_info - Helper function to populate lb_en and lan_en 2247 * @hw: pointer to the hardware structure 2248 * @fi: filter info structure to fill/update 2249 * 2250 * This helper function populates the lb_en and lan_en elements of the provided 2251 * ice_fltr_info struct using the switch's type and characteristics of the 2252 * switch rule being configured. 2253 */ 2254 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) 2255 { 2256 fi->lb_en = false; 2257 fi->lan_en = false; 2258 if ((fi->flag & ICE_FLTR_TX) && 2259 (fi->fltr_act == ICE_FWD_TO_VSI || 2260 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 2261 fi->fltr_act == ICE_FWD_TO_Q || 2262 fi->fltr_act == ICE_FWD_TO_QGRP)) { 2263 /* Setting LB for prune actions will result in replicated 2264 * packets to the internal switch that will be dropped. 2265 */ 2266 if (fi->lkup_type != ICE_SW_LKUP_VLAN) 2267 fi->lb_en = true; 2268 2269 /* Set lan_en to TRUE if 2270 * 1. The switch is a VEB AND 2271 * 2 2272 * 2.1 The lookup is a directional lookup like ethertype, 2273 * promiscuous, ethertype-MAC, promiscuous-VLAN 2274 * and default-port OR 2275 * 2.2 The lookup is VLAN, OR 2276 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR 2277 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC. 2278 * 2279 * OR 2280 * 2281 * The switch is a VEPA. 2282 * 2283 * In all other cases, the LAN enable has to be set to false. 2284 */ 2285 if (hw->evb_veb) { 2286 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE || 2287 fi->lkup_type == ICE_SW_LKUP_PROMISC || 2288 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 2289 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN || 2290 fi->lkup_type == ICE_SW_LKUP_DFLT || 2291 fi->lkup_type == ICE_SW_LKUP_VLAN || 2292 (fi->lkup_type == ICE_SW_LKUP_MAC && 2293 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) || 2294 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && 2295 !is_unicast_ether_addr(fi->l_data.mac.mac_addr))) 2296 fi->lan_en = true; 2297 } else { 2298 fi->lan_en = true; 2299 } 2300 } 2301 } 2302 2303 /** 2304 * ice_fill_sw_rule - Helper function to fill switch rule structure 2305 * @hw: pointer to the hardware structure 2306 * @f_info: entry containing packet forwarding information 2307 * @s_rule: switch rule structure to be filled in based on mac_entry 2308 * @opc: switch rules population command type - pass in the command opcode 2309 */ 2310 static void 2311 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, 2312 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) 2313 { 2314 u16 vlan_id = ICE_MAX_VLAN_ID + 1; 2315 u16 vlan_tpid = ETH_P_8021Q; 2316 void *daddr = NULL; 2317 u16 eth_hdr_sz; 2318 u8 *eth_hdr; 2319 u32 act = 0; 2320 __be16 *off; 2321 u8 q_rgn; 2322 2323 if (opc == ice_aqc_opc_remove_sw_rules) { 2324 s_rule->pdata.lkup_tx_rx.act = 0; 2325 s_rule->pdata.lkup_tx_rx.index = 2326 cpu_to_le16(f_info->fltr_rule_id); 2327 s_rule->pdata.lkup_tx_rx.hdr_len = 0; 2328 return; 2329 } 2330 2331 eth_hdr_sz = sizeof(dummy_eth_header); 2332 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr; 2333 2334 /* initialize the ether header with a dummy header */ 2335 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz); 2336 ice_fill_sw_info(hw, f_info); 2337 2338 switch (f_info->fltr_act) { 2339 case ICE_FWD_TO_VSI: 2340 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & 2341 ICE_SINGLE_ACT_VSI_ID_M; 2342 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 2343 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 2344 ICE_SINGLE_ACT_VALID_BIT; 2345 break; 2346 case ICE_FWD_TO_VSI_LIST: 2347 act |= ICE_SINGLE_ACT_VSI_LIST; 2348 act |= (f_info->fwd_id.vsi_list_id << 2349 ICE_SINGLE_ACT_VSI_LIST_ID_S) & 2350 ICE_SINGLE_ACT_VSI_LIST_ID_M; 2351 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 2352 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 2353 ICE_SINGLE_ACT_VALID_BIT; 2354 break; 2355 case ICE_FWD_TO_Q: 2356 act |= ICE_SINGLE_ACT_TO_Q; 2357 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 2358 ICE_SINGLE_ACT_Q_INDEX_M; 2359 break; 2360 case ICE_DROP_PACKET: 2361 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 2362 ICE_SINGLE_ACT_VALID_BIT; 2363 break; 2364 case ICE_FWD_TO_QGRP: 2365 q_rgn = f_info->qgrp_size > 0 ? 2366 (u8)ilog2(f_info->qgrp_size) : 0; 2367 act |= ICE_SINGLE_ACT_TO_Q; 2368 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 2369 ICE_SINGLE_ACT_Q_INDEX_M; 2370 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & 2371 ICE_SINGLE_ACT_Q_REGION_M; 2372 break; 2373 default: 2374 return; 2375 } 2376 2377 if (f_info->lb_en) 2378 act |= ICE_SINGLE_ACT_LB_ENABLE; 2379 if (f_info->lan_en) 2380 act |= ICE_SINGLE_ACT_LAN_ENABLE; 2381 2382 switch (f_info->lkup_type) { 2383 case ICE_SW_LKUP_MAC: 2384 daddr = f_info->l_data.mac.mac_addr; 2385 break; 2386 case ICE_SW_LKUP_VLAN: 2387 vlan_id = f_info->l_data.vlan.vlan_id; 2388 if (f_info->l_data.vlan.tpid_valid) 2389 vlan_tpid = f_info->l_data.vlan.tpid; 2390 if (f_info->fltr_act == ICE_FWD_TO_VSI || 2391 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { 2392 act |= ICE_SINGLE_ACT_PRUNE; 2393 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS; 2394 } 2395 break; 2396 case ICE_SW_LKUP_ETHERTYPE_MAC: 2397 daddr = f_info->l_data.ethertype_mac.mac_addr; 2398 fallthrough; 2399 case ICE_SW_LKUP_ETHERTYPE: 2400 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); 2401 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); 2402 break; 2403 case ICE_SW_LKUP_MAC_VLAN: 2404 daddr = f_info->l_data.mac_vlan.mac_addr; 2405 vlan_id = f_info->l_data.mac_vlan.vlan_id; 2406 break; 2407 case ICE_SW_LKUP_PROMISC_VLAN: 2408 vlan_id = f_info->l_data.mac_vlan.vlan_id; 2409 fallthrough; 2410 case ICE_SW_LKUP_PROMISC: 2411 daddr = f_info->l_data.mac_vlan.mac_addr; 2412 break; 2413 default: 2414 break; 2415 } 2416 2417 s_rule->type = (f_info->flag & ICE_FLTR_RX) ? 2418 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) : 2419 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); 2420 2421 /* Recipe set depending on lookup type */ 2422 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type); 2423 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src); 2424 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); 2425 2426 if (daddr) 2427 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr); 2428 2429 if (!(vlan_id > ICE_MAX_VLAN_ID)) { 2430 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); 2431 *off = cpu_to_be16(vlan_id); 2432 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); 2433 *off = cpu_to_be16(vlan_tpid); 2434 } 2435 2436 /* Create the switch rule with the final dummy Ethernet header */ 2437 if (opc != ice_aqc_opc_update_sw_rules) 2438 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz); 2439 } 2440 2441 /** 2442 * ice_add_marker_act 2443 * @hw: pointer to the hardware structure 2444 * @m_ent: the management entry for which sw marker needs to be added 2445 * @sw_marker: sw marker to tag the Rx descriptor with 2446 * @l_id: large action resource ID 2447 * 2448 * Create a large action to hold software marker and update the switch rule 2449 * entry pointed by m_ent with newly created large action 2450 */ 2451 static int 2452 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, 2453 u16 sw_marker, u16 l_id) 2454 { 2455 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx; 2456 /* For software marker we need 3 large actions 2457 * 1. FWD action: FWD TO VSI or VSI LIST 2458 * 2. GENERIC VALUE action to hold the profile ID 2459 * 3. GENERIC VALUE action to hold the software marker ID 2460 */ 2461 const u16 num_lg_acts = 3; 2462 u16 lg_act_size; 2463 u16 rules_size; 2464 int status; 2465 u32 act; 2466 u16 id; 2467 2468 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) 2469 return -EINVAL; 2470 2471 /* Create two back-to-back switch rules and submit them to the HW using 2472 * one memory buffer: 2473 * 1. Large Action 2474 * 2. Look up Tx Rx 2475 */ 2476 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts); 2477 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 2478 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); 2479 if (!lg_act) 2480 return -ENOMEM; 2481 2482 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); 2483 2484 /* Fill in the first switch rule i.e. large action */ 2485 lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT); 2486 lg_act->pdata.lg_act.index = cpu_to_le16(l_id); 2487 lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts); 2488 2489 /* First action VSI forwarding or VSI list forwarding depending on how 2490 * many VSIs 2491 */ 2492 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id : 2493 m_ent->fltr_info.fwd_id.hw_vsi_id; 2494 2495 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; 2496 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; 2497 if (m_ent->vsi_count > 1) 2498 act |= ICE_LG_ACT_VSI_LIST; 2499 lg_act->pdata.lg_act.act[0] = cpu_to_le32(act); 2500 2501 /* Second action descriptor type */ 2502 act = ICE_LG_ACT_GENERIC; 2503 2504 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; 2505 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); 2506 2507 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << 2508 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; 2509 2510 /* Third action Marker value */ 2511 act |= ICE_LG_ACT_GENERIC; 2512 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & 2513 ICE_LG_ACT_GENERIC_VALUE_M; 2514 2515 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); 2516 2517 /* call the fill switch rule to fill the lookup Tx Rx structure */ 2518 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, 2519 ice_aqc_opc_update_sw_rules); 2520 2521 /* Update the action to point to the large action ID */ 2522 rx_tx->pdata.lkup_tx_rx.act = 2523 cpu_to_le32(ICE_SINGLE_ACT_PTR | 2524 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & 2525 ICE_SINGLE_ACT_PTR_VAL_M)); 2526 2527 /* Use the filter rule ID of the previously created rule with single 2528 * act. Once the update happens, hardware will treat this as large 2529 * action 2530 */ 2531 rx_tx->pdata.lkup_tx_rx.index = 2532 cpu_to_le16(m_ent->fltr_info.fltr_rule_id); 2533 2534 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2, 2535 ice_aqc_opc_update_sw_rules, NULL); 2536 if (!status) { 2537 m_ent->lg_act_idx = l_id; 2538 m_ent->sw_marker_id = sw_marker; 2539 } 2540 2541 devm_kfree(ice_hw_to_dev(hw), lg_act); 2542 return status; 2543 } 2544 2545 /** 2546 * ice_create_vsi_list_map 2547 * @hw: pointer to the hardware structure 2548 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping 2549 * @num_vsi: number of VSI handles in the array 2550 * @vsi_list_id: VSI list ID generated as part of allocate resource 2551 * 2552 * Helper function to create a new entry of VSI list ID to VSI mapping 2553 * using the given VSI list ID 2554 */ 2555 static struct ice_vsi_list_map_info * 2556 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 2557 u16 vsi_list_id) 2558 { 2559 struct ice_switch_info *sw = hw->switch_info; 2560 struct ice_vsi_list_map_info *v_map; 2561 int i; 2562 2563 v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL); 2564 if (!v_map) 2565 return NULL; 2566 2567 v_map->vsi_list_id = vsi_list_id; 2568 v_map->ref_cnt = 1; 2569 for (i = 0; i < num_vsi; i++) 2570 set_bit(vsi_handle_arr[i], v_map->vsi_map); 2571 2572 list_add(&v_map->list_entry, &sw->vsi_list_map_head); 2573 return v_map; 2574 } 2575 2576 /** 2577 * ice_update_vsi_list_rule 2578 * @hw: pointer to the hardware structure 2579 * @vsi_handle_arr: array of VSI handles to form a VSI list 2580 * @num_vsi: number of VSI handles in the array 2581 * @vsi_list_id: VSI list ID generated as part of allocate resource 2582 * @remove: Boolean value to indicate if this is a remove action 2583 * @opc: switch rules population command type - pass in the command opcode 2584 * @lkup_type: lookup type of the filter 2585 * 2586 * Call AQ command to add a new switch rule or update existing switch rule 2587 * using the given VSI list ID 2588 */ 2589 static int 2590 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 2591 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, 2592 enum ice_sw_lkup_type lkup_type) 2593 { 2594 struct ice_aqc_sw_rules_elem *s_rule; 2595 u16 s_rule_size; 2596 u16 rule_type; 2597 int status; 2598 int i; 2599 2600 if (!num_vsi) 2601 return -EINVAL; 2602 2603 if (lkup_type == ICE_SW_LKUP_MAC || 2604 lkup_type == ICE_SW_LKUP_MAC_VLAN || 2605 lkup_type == ICE_SW_LKUP_ETHERTYPE || 2606 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 2607 lkup_type == ICE_SW_LKUP_PROMISC || 2608 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) 2609 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : 2610 ICE_AQC_SW_RULES_T_VSI_LIST_SET; 2611 else if (lkup_type == ICE_SW_LKUP_VLAN) 2612 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : 2613 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; 2614 else 2615 return -EINVAL; 2616 2617 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); 2618 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 2619 if (!s_rule) 2620 return -ENOMEM; 2621 for (i = 0; i < num_vsi; i++) { 2622 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { 2623 status = -EINVAL; 2624 goto exit; 2625 } 2626 /* AQ call requires hw_vsi_id(s) */ 2627 s_rule->pdata.vsi_list.vsi[i] = 2628 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); 2629 } 2630 2631 s_rule->type = cpu_to_le16(rule_type); 2632 s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); 2633 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 2634 2635 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL); 2636 2637 exit: 2638 devm_kfree(ice_hw_to_dev(hw), s_rule); 2639 return status; 2640 } 2641 2642 /** 2643 * ice_create_vsi_list_rule - Creates and populates a VSI list rule 2644 * @hw: pointer to the HW struct 2645 * @vsi_handle_arr: array of VSI handles to form a VSI list 2646 * @num_vsi: number of VSI handles in the array 2647 * @vsi_list_id: stores the ID of the VSI list to be created 2648 * @lkup_type: switch rule filter's lookup type 2649 */ 2650 static int 2651 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 2652 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) 2653 { 2654 int status; 2655 2656 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, 2657 ice_aqc_opc_alloc_res); 2658 if (status) 2659 return status; 2660 2661 /* Update the newly created VSI list to include the specified VSIs */ 2662 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi, 2663 *vsi_list_id, false, 2664 ice_aqc_opc_add_sw_rules, lkup_type); 2665 } 2666 2667 /** 2668 * ice_create_pkt_fwd_rule 2669 * @hw: pointer to the hardware structure 2670 * @f_entry: entry containing packet forwarding information 2671 * 2672 * Create switch rule with given filter information and add an entry 2673 * to the corresponding filter management list to track this switch rule 2674 * and VSI mapping 2675 */ 2676 static int 2677 ice_create_pkt_fwd_rule(struct ice_hw *hw, 2678 struct ice_fltr_list_entry *f_entry) 2679 { 2680 struct ice_fltr_mgmt_list_entry *fm_entry; 2681 struct ice_aqc_sw_rules_elem *s_rule; 2682 enum ice_sw_lkup_type l_type; 2683 struct ice_sw_recipe *recp; 2684 int status; 2685 2686 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 2687 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 2688 if (!s_rule) 2689 return -ENOMEM; 2690 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), 2691 GFP_KERNEL); 2692 if (!fm_entry) { 2693 status = -ENOMEM; 2694 goto ice_create_pkt_fwd_rule_exit; 2695 } 2696 2697 fm_entry->fltr_info = f_entry->fltr_info; 2698 2699 /* Initialize all the fields for the management entry */ 2700 fm_entry->vsi_count = 1; 2701 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX; 2702 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID; 2703 fm_entry->counter_index = ICE_INVAL_COUNTER_ID; 2704 2705 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule, 2706 ice_aqc_opc_add_sw_rules); 2707 2708 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 2709 ice_aqc_opc_add_sw_rules, NULL); 2710 if (status) { 2711 devm_kfree(ice_hw_to_dev(hw), fm_entry); 2712 goto ice_create_pkt_fwd_rule_exit; 2713 } 2714 2715 f_entry->fltr_info.fltr_rule_id = 2716 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 2717 fm_entry->fltr_info.fltr_rule_id = 2718 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 2719 2720 /* The book keeping entries will get removed when base driver 2721 * calls remove filter AQ command 2722 */ 2723 l_type = fm_entry->fltr_info.lkup_type; 2724 recp = &hw->switch_info->recp_list[l_type]; 2725 list_add(&fm_entry->list_entry, &recp->filt_rules); 2726 2727 ice_create_pkt_fwd_rule_exit: 2728 devm_kfree(ice_hw_to_dev(hw), s_rule); 2729 return status; 2730 } 2731 2732 /** 2733 * ice_update_pkt_fwd_rule 2734 * @hw: pointer to the hardware structure 2735 * @f_info: filter information for switch rule 2736 * 2737 * Call AQ command to update a previously created switch rule with a 2738 * VSI list ID 2739 */ 2740 static int 2741 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) 2742 { 2743 struct ice_aqc_sw_rules_elem *s_rule; 2744 int status; 2745 2746 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 2747 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 2748 if (!s_rule) 2749 return -ENOMEM; 2750 2751 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); 2752 2753 s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id); 2754 2755 /* Update switch rule with new rule set to forward VSI list */ 2756 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 2757 ice_aqc_opc_update_sw_rules, NULL); 2758 2759 devm_kfree(ice_hw_to_dev(hw), s_rule); 2760 return status; 2761 } 2762 2763 /** 2764 * ice_update_sw_rule_bridge_mode 2765 * @hw: pointer to the HW struct 2766 * 2767 * Updates unicast switch filter rules based on VEB/VEPA mode 2768 */ 2769 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw) 2770 { 2771 struct ice_switch_info *sw = hw->switch_info; 2772 struct ice_fltr_mgmt_list_entry *fm_entry; 2773 struct list_head *rule_head; 2774 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2775 int status = 0; 2776 2777 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 2778 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 2779 2780 mutex_lock(rule_lock); 2781 list_for_each_entry(fm_entry, rule_head, list_entry) { 2782 struct ice_fltr_info *fi = &fm_entry->fltr_info; 2783 u8 *addr = fi->l_data.mac.mac_addr; 2784 2785 /* Update unicast Tx rules to reflect the selected 2786 * VEB/VEPA mode 2787 */ 2788 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) && 2789 (fi->fltr_act == ICE_FWD_TO_VSI || 2790 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 2791 fi->fltr_act == ICE_FWD_TO_Q || 2792 fi->fltr_act == ICE_FWD_TO_QGRP)) { 2793 status = ice_update_pkt_fwd_rule(hw, fi); 2794 if (status) 2795 break; 2796 } 2797 } 2798 2799 mutex_unlock(rule_lock); 2800 2801 return status; 2802 } 2803 2804 /** 2805 * ice_add_update_vsi_list 2806 * @hw: pointer to the hardware structure 2807 * @m_entry: pointer to current filter management list entry 2808 * @cur_fltr: filter information from the book keeping entry 2809 * @new_fltr: filter information with the new VSI to be added 2810 * 2811 * Call AQ command to add or update previously created VSI list with new VSI. 2812 * 2813 * Helper function to do book keeping associated with adding filter information 2814 * The algorithm to do the book keeping is described below : 2815 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.) 2816 * if only one VSI has been added till now 2817 * Allocate a new VSI list and add two VSIs 2818 * to this list using switch rule command 2819 * Update the previously created switch rule with the 2820 * newly created VSI list ID 2821 * if a VSI list was previously created 2822 * Add the new VSI to the previously created VSI list set 2823 * using the update switch rule command 2824 */ 2825 static int 2826 ice_add_update_vsi_list(struct ice_hw *hw, 2827 struct ice_fltr_mgmt_list_entry *m_entry, 2828 struct ice_fltr_info *cur_fltr, 2829 struct ice_fltr_info *new_fltr) 2830 { 2831 u16 vsi_list_id = 0; 2832 int status = 0; 2833 2834 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || 2835 cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) 2836 return -EOPNOTSUPP; 2837 2838 if ((new_fltr->fltr_act == ICE_FWD_TO_Q || 2839 new_fltr->fltr_act == ICE_FWD_TO_QGRP) && 2840 (cur_fltr->fltr_act == ICE_FWD_TO_VSI || 2841 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) 2842 return -EOPNOTSUPP; 2843 2844 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { 2845 /* Only one entry existed in the mapping and it was not already 2846 * a part of a VSI list. So, create a VSI list with the old and 2847 * new VSIs. 2848 */ 2849 struct ice_fltr_info tmp_fltr; 2850 u16 vsi_handle_arr[2]; 2851 2852 /* A rule already exists with the new VSI being added */ 2853 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) 2854 return -EEXIST; 2855 2856 vsi_handle_arr[0] = cur_fltr->vsi_handle; 2857 vsi_handle_arr[1] = new_fltr->vsi_handle; 2858 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 2859 &vsi_list_id, 2860 new_fltr->lkup_type); 2861 if (status) 2862 return status; 2863 2864 tmp_fltr = *new_fltr; 2865 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; 2866 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 2867 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 2868 /* Update the previous switch rule of "MAC forward to VSI" to 2869 * "MAC fwd to VSI list" 2870 */ 2871 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 2872 if (status) 2873 return status; 2874 2875 cur_fltr->fwd_id.vsi_list_id = vsi_list_id; 2876 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 2877 m_entry->vsi_list_info = 2878 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 2879 vsi_list_id); 2880 2881 if (!m_entry->vsi_list_info) 2882 return -ENOMEM; 2883 2884 /* If this entry was large action then the large action needs 2885 * to be updated to point to FWD to VSI list 2886 */ 2887 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) 2888 status = 2889 ice_add_marker_act(hw, m_entry, 2890 m_entry->sw_marker_id, 2891 m_entry->lg_act_idx); 2892 } else { 2893 u16 vsi_handle = new_fltr->vsi_handle; 2894 enum ice_adminq_opc opcode; 2895 2896 if (!m_entry->vsi_list_info) 2897 return -EIO; 2898 2899 /* A rule already exists with the new VSI being added */ 2900 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 2901 return 0; 2902 2903 /* Update the previously created VSI list set with 2904 * the new VSI ID passed in 2905 */ 2906 vsi_list_id = cur_fltr->fwd_id.vsi_list_id; 2907 opcode = ice_aqc_opc_update_sw_rules; 2908 2909 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, 2910 vsi_list_id, false, opcode, 2911 new_fltr->lkup_type); 2912 /* update VSI list mapping info with new VSI ID */ 2913 if (!status) 2914 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); 2915 } 2916 if (!status) 2917 m_entry->vsi_count++; 2918 return status; 2919 } 2920 2921 /** 2922 * ice_find_rule_entry - Search a rule entry 2923 * @hw: pointer to the hardware structure 2924 * @recp_id: lookup type for which the specified rule needs to be searched 2925 * @f_info: rule information 2926 * 2927 * Helper function to search for a given rule entry 2928 * Returns pointer to entry storing the rule if found 2929 */ 2930 static struct ice_fltr_mgmt_list_entry * 2931 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info) 2932 { 2933 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL; 2934 struct ice_switch_info *sw = hw->switch_info; 2935 struct list_head *list_head; 2936 2937 list_head = &sw->recp_list[recp_id].filt_rules; 2938 list_for_each_entry(list_itr, list_head, list_entry) { 2939 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 2940 sizeof(f_info->l_data)) && 2941 f_info->flag == list_itr->fltr_info.flag) { 2942 ret = list_itr; 2943 break; 2944 } 2945 } 2946 return ret; 2947 } 2948 2949 /** 2950 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1 2951 * @hw: pointer to the hardware structure 2952 * @recp_id: lookup type for which VSI lists needs to be searched 2953 * @vsi_handle: VSI handle to be found in VSI list 2954 * @vsi_list_id: VSI list ID found containing vsi_handle 2955 * 2956 * Helper function to search a VSI list with single entry containing given VSI 2957 * handle element. This can be extended further to search VSI list with more 2958 * than 1 vsi_count. Returns pointer to VSI list entry if found. 2959 */ 2960 static struct ice_vsi_list_map_info * 2961 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, 2962 u16 *vsi_list_id) 2963 { 2964 struct ice_vsi_list_map_info *map_info = NULL; 2965 struct ice_switch_info *sw = hw->switch_info; 2966 struct ice_fltr_mgmt_list_entry *list_itr; 2967 struct list_head *list_head; 2968 2969 list_head = &sw->recp_list[recp_id].filt_rules; 2970 list_for_each_entry(list_itr, list_head, list_entry) { 2971 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { 2972 map_info = list_itr->vsi_list_info; 2973 if (test_bit(vsi_handle, map_info->vsi_map)) { 2974 *vsi_list_id = map_info->vsi_list_id; 2975 return map_info; 2976 } 2977 } 2978 } 2979 return NULL; 2980 } 2981 2982 /** 2983 * ice_add_rule_internal - add rule for a given lookup type 2984 * @hw: pointer to the hardware structure 2985 * @recp_id: lookup type (recipe ID) for which rule has to be added 2986 * @f_entry: structure containing MAC forwarding information 2987 * 2988 * Adds or updates the rule lists for a given recipe 2989 */ 2990 static int 2991 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, 2992 struct ice_fltr_list_entry *f_entry) 2993 { 2994 struct ice_switch_info *sw = hw->switch_info; 2995 struct ice_fltr_info *new_fltr, *cur_fltr; 2996 struct ice_fltr_mgmt_list_entry *m_entry; 2997 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2998 int status = 0; 2999 3000 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 3001 return -EINVAL; 3002 f_entry->fltr_info.fwd_id.hw_vsi_id = 3003 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 3004 3005 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 3006 3007 mutex_lock(rule_lock); 3008 new_fltr = &f_entry->fltr_info; 3009 if (new_fltr->flag & ICE_FLTR_RX) 3010 new_fltr->src = hw->port_info->lport; 3011 else if (new_fltr->flag & ICE_FLTR_TX) 3012 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id; 3013 3014 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr); 3015 if (!m_entry) { 3016 mutex_unlock(rule_lock); 3017 return ice_create_pkt_fwd_rule(hw, f_entry); 3018 } 3019 3020 cur_fltr = &m_entry->fltr_info; 3021 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr); 3022 mutex_unlock(rule_lock); 3023 3024 return status; 3025 } 3026 3027 /** 3028 * ice_remove_vsi_list_rule 3029 * @hw: pointer to the hardware structure 3030 * @vsi_list_id: VSI list ID generated as part of allocate resource 3031 * @lkup_type: switch rule filter lookup type 3032 * 3033 * The VSI list should be emptied before this function is called to remove the 3034 * VSI list. 3035 */ 3036 static int 3037 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, 3038 enum ice_sw_lkup_type lkup_type) 3039 { 3040 struct ice_aqc_sw_rules_elem *s_rule; 3041 u16 s_rule_size; 3042 int status; 3043 3044 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); 3045 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 3046 if (!s_rule) 3047 return -ENOMEM; 3048 3049 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); 3050 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 3051 3052 /* Free the vsi_list resource that we allocated. It is assumed that the 3053 * list is empty at this point. 3054 */ 3055 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, 3056 ice_aqc_opc_free_res); 3057 3058 devm_kfree(ice_hw_to_dev(hw), s_rule); 3059 return status; 3060 } 3061 3062 /** 3063 * ice_rem_update_vsi_list 3064 * @hw: pointer to the hardware structure 3065 * @vsi_handle: VSI handle of the VSI to remove 3066 * @fm_list: filter management entry for which the VSI list management needs to 3067 * be done 3068 */ 3069 static int 3070 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, 3071 struct ice_fltr_mgmt_list_entry *fm_list) 3072 { 3073 enum ice_sw_lkup_type lkup_type; 3074 u16 vsi_list_id; 3075 int status = 0; 3076 3077 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || 3078 fm_list->vsi_count == 0) 3079 return -EINVAL; 3080 3081 /* A rule with the VSI being removed does not exist */ 3082 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) 3083 return -ENOENT; 3084 3085 lkup_type = fm_list->fltr_info.lkup_type; 3086 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; 3087 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, 3088 ice_aqc_opc_update_sw_rules, 3089 lkup_type); 3090 if (status) 3091 return status; 3092 3093 fm_list->vsi_count--; 3094 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 3095 3096 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) { 3097 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info; 3098 struct ice_vsi_list_map_info *vsi_list_info = 3099 fm_list->vsi_list_info; 3100 u16 rem_vsi_handle; 3101 3102 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, 3103 ICE_MAX_VSI); 3104 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 3105 return -EIO; 3106 3107 /* Make sure VSI list is empty before removing it below */ 3108 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 3109 vsi_list_id, true, 3110 ice_aqc_opc_update_sw_rules, 3111 lkup_type); 3112 if (status) 3113 return status; 3114 3115 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI; 3116 tmp_fltr_info.fwd_id.hw_vsi_id = 3117 ice_get_hw_vsi_num(hw, rem_vsi_handle); 3118 tmp_fltr_info.vsi_handle = rem_vsi_handle; 3119 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); 3120 if (status) { 3121 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", 3122 tmp_fltr_info.fwd_id.hw_vsi_id, status); 3123 return status; 3124 } 3125 3126 fm_list->fltr_info = tmp_fltr_info; 3127 } 3128 3129 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || 3130 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { 3131 struct ice_vsi_list_map_info *vsi_list_info = 3132 fm_list->vsi_list_info; 3133 3134 /* Remove the VSI list since it is no longer used */ 3135 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 3136 if (status) { 3137 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", 3138 vsi_list_id, status); 3139 return status; 3140 } 3141 3142 list_del(&vsi_list_info->list_entry); 3143 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 3144 fm_list->vsi_list_info = NULL; 3145 } 3146 3147 return status; 3148 } 3149 3150 /** 3151 * ice_remove_rule_internal - Remove a filter rule of a given type 3152 * @hw: pointer to the hardware structure 3153 * @recp_id: recipe ID for which the rule needs to removed 3154 * @f_entry: rule entry containing filter information 3155 */ 3156 static int 3157 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, 3158 struct ice_fltr_list_entry *f_entry) 3159 { 3160 struct ice_switch_info *sw = hw->switch_info; 3161 struct ice_fltr_mgmt_list_entry *list_elem; 3162 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3163 bool remove_rule = false; 3164 u16 vsi_handle; 3165 int status = 0; 3166 3167 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 3168 return -EINVAL; 3169 f_entry->fltr_info.fwd_id.hw_vsi_id = 3170 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 3171 3172 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 3173 mutex_lock(rule_lock); 3174 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); 3175 if (!list_elem) { 3176 status = -ENOENT; 3177 goto exit; 3178 } 3179 3180 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { 3181 remove_rule = true; 3182 } else if (!list_elem->vsi_list_info) { 3183 status = -ENOENT; 3184 goto exit; 3185 } else if (list_elem->vsi_list_info->ref_cnt > 1) { 3186 /* a ref_cnt > 1 indicates that the vsi_list is being 3187 * shared by multiple rules. Decrement the ref_cnt and 3188 * remove this rule, but do not modify the list, as it 3189 * is in-use by other rules. 3190 */ 3191 list_elem->vsi_list_info->ref_cnt--; 3192 remove_rule = true; 3193 } else { 3194 /* a ref_cnt of 1 indicates the vsi_list is only used 3195 * by one rule. However, the original removal request is only 3196 * for a single VSI. Update the vsi_list first, and only 3197 * remove the rule if there are no further VSIs in this list. 3198 */ 3199 vsi_handle = f_entry->fltr_info.vsi_handle; 3200 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); 3201 if (status) 3202 goto exit; 3203 /* if VSI count goes to zero after updating the VSI list */ 3204 if (list_elem->vsi_count == 0) 3205 remove_rule = true; 3206 } 3207 3208 if (remove_rule) { 3209 /* Remove the lookup rule */ 3210 struct ice_aqc_sw_rules_elem *s_rule; 3211 3212 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 3213 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 3214 GFP_KERNEL); 3215 if (!s_rule) { 3216 status = -ENOMEM; 3217 goto exit; 3218 } 3219 3220 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule, 3221 ice_aqc_opc_remove_sw_rules); 3222 3223 status = ice_aq_sw_rules(hw, s_rule, 3224 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, 3225 ice_aqc_opc_remove_sw_rules, NULL); 3226 3227 /* Remove a book keeping from the list */ 3228 devm_kfree(ice_hw_to_dev(hw), s_rule); 3229 3230 if (status) 3231 goto exit; 3232 3233 list_del(&list_elem->list_entry); 3234 devm_kfree(ice_hw_to_dev(hw), list_elem); 3235 } 3236 exit: 3237 mutex_unlock(rule_lock); 3238 return status; 3239 } 3240 3241 /** 3242 * ice_mac_fltr_exist - does this MAC filter exist for given VSI 3243 * @hw: pointer to the hardware structure 3244 * @mac: MAC address to be checked (for MAC filter) 3245 * @vsi_handle: check MAC filter for this VSI 3246 */ 3247 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle) 3248 { 3249 struct ice_fltr_mgmt_list_entry *entry; 3250 struct list_head *rule_head; 3251 struct ice_switch_info *sw; 3252 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3253 u16 hw_vsi_id; 3254 3255 if (!ice_is_vsi_valid(hw, vsi_handle)) 3256 return false; 3257 3258 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3259 sw = hw->switch_info; 3260 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 3261 if (!rule_head) 3262 return false; 3263 3264 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 3265 mutex_lock(rule_lock); 3266 list_for_each_entry(entry, rule_head, list_entry) { 3267 struct ice_fltr_info *f_info = &entry->fltr_info; 3268 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 3269 3270 if (is_zero_ether_addr(mac_addr)) 3271 continue; 3272 3273 if (f_info->flag != ICE_FLTR_TX || 3274 f_info->src_id != ICE_SRC_ID_VSI || 3275 f_info->lkup_type != ICE_SW_LKUP_MAC || 3276 f_info->fltr_act != ICE_FWD_TO_VSI || 3277 hw_vsi_id != f_info->fwd_id.hw_vsi_id) 3278 continue; 3279 3280 if (ether_addr_equal(mac, mac_addr)) { 3281 mutex_unlock(rule_lock); 3282 return true; 3283 } 3284 } 3285 mutex_unlock(rule_lock); 3286 return false; 3287 } 3288 3289 /** 3290 * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI 3291 * @hw: pointer to the hardware structure 3292 * @vlan_id: VLAN ID 3293 * @vsi_handle: check MAC filter for this VSI 3294 */ 3295 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle) 3296 { 3297 struct ice_fltr_mgmt_list_entry *entry; 3298 struct list_head *rule_head; 3299 struct ice_switch_info *sw; 3300 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3301 u16 hw_vsi_id; 3302 3303 if (vlan_id > ICE_MAX_VLAN_ID) 3304 return false; 3305 3306 if (!ice_is_vsi_valid(hw, vsi_handle)) 3307 return false; 3308 3309 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3310 sw = hw->switch_info; 3311 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 3312 if (!rule_head) 3313 return false; 3314 3315 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 3316 mutex_lock(rule_lock); 3317 list_for_each_entry(entry, rule_head, list_entry) { 3318 struct ice_fltr_info *f_info = &entry->fltr_info; 3319 u16 entry_vlan_id = f_info->l_data.vlan.vlan_id; 3320 struct ice_vsi_list_map_info *map_info; 3321 3322 if (entry_vlan_id > ICE_MAX_VLAN_ID) 3323 continue; 3324 3325 if (f_info->flag != ICE_FLTR_TX || 3326 f_info->src_id != ICE_SRC_ID_VSI || 3327 f_info->lkup_type != ICE_SW_LKUP_VLAN) 3328 continue; 3329 3330 /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */ 3331 if (f_info->fltr_act != ICE_FWD_TO_VSI && 3332 f_info->fltr_act != ICE_FWD_TO_VSI_LIST) 3333 continue; 3334 3335 if (f_info->fltr_act == ICE_FWD_TO_VSI) { 3336 if (hw_vsi_id != f_info->fwd_id.hw_vsi_id) 3337 continue; 3338 } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { 3339 /* If filter_action is FWD_TO_VSI_LIST, make sure 3340 * that VSI being checked is part of VSI list 3341 */ 3342 if (entry->vsi_count == 1 && 3343 entry->vsi_list_info) { 3344 map_info = entry->vsi_list_info; 3345 if (!test_bit(vsi_handle, map_info->vsi_map)) 3346 continue; 3347 } 3348 } 3349 3350 if (vlan_id == entry_vlan_id) { 3351 mutex_unlock(rule_lock); 3352 return true; 3353 } 3354 } 3355 mutex_unlock(rule_lock); 3356 3357 return false; 3358 } 3359 3360 /** 3361 * ice_add_mac - Add a MAC address based filter rule 3362 * @hw: pointer to the hardware structure 3363 * @m_list: list of MAC addresses and forwarding information 3364 * 3365 * IMPORTANT: When the ucast_shared flag is set to false and m_list has 3366 * multiple unicast addresses, the function assumes that all the 3367 * addresses are unique in a given add_mac call. It doesn't 3368 * check for duplicates in this case, removing duplicates from a given 3369 * list should be taken care of in the caller of this function. 3370 */ 3371 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list) 3372 { 3373 struct ice_aqc_sw_rules_elem *s_rule, *r_iter; 3374 struct ice_fltr_list_entry *m_list_itr; 3375 struct list_head *rule_head; 3376 u16 total_elem_left, s_rule_size; 3377 struct ice_switch_info *sw; 3378 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3379 u16 num_unicast = 0; 3380 int status = 0; 3381 u8 elem_sent; 3382 3383 if (!m_list || !hw) 3384 return -EINVAL; 3385 3386 s_rule = NULL; 3387 sw = hw->switch_info; 3388 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 3389 list_for_each_entry(m_list_itr, m_list, list_entry) { 3390 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; 3391 u16 vsi_handle; 3392 u16 hw_vsi_id; 3393 3394 m_list_itr->fltr_info.flag = ICE_FLTR_TX; 3395 vsi_handle = m_list_itr->fltr_info.vsi_handle; 3396 if (!ice_is_vsi_valid(hw, vsi_handle)) 3397 return -EINVAL; 3398 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3399 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; 3400 /* update the src in case it is VSI num */ 3401 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) 3402 return -EINVAL; 3403 m_list_itr->fltr_info.src = hw_vsi_id; 3404 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || 3405 is_zero_ether_addr(add)) 3406 return -EINVAL; 3407 if (is_unicast_ether_addr(add) && !hw->ucast_shared) { 3408 /* Don't overwrite the unicast address */ 3409 mutex_lock(rule_lock); 3410 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, 3411 &m_list_itr->fltr_info)) { 3412 mutex_unlock(rule_lock); 3413 return -EEXIST; 3414 } 3415 mutex_unlock(rule_lock); 3416 num_unicast++; 3417 } else if (is_multicast_ether_addr(add) || 3418 (is_unicast_ether_addr(add) && hw->ucast_shared)) { 3419 m_list_itr->status = 3420 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC, 3421 m_list_itr); 3422 if (m_list_itr->status) 3423 return m_list_itr->status; 3424 } 3425 } 3426 3427 mutex_lock(rule_lock); 3428 /* Exit if no suitable entries were found for adding bulk switch rule */ 3429 if (!num_unicast) { 3430 status = 0; 3431 goto ice_add_mac_exit; 3432 } 3433 3434 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 3435 3436 /* Allocate switch rule buffer for the bulk update for unicast */ 3437 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 3438 s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, 3439 GFP_KERNEL); 3440 if (!s_rule) { 3441 status = -ENOMEM; 3442 goto ice_add_mac_exit; 3443 } 3444 3445 r_iter = s_rule; 3446 list_for_each_entry(m_list_itr, m_list, list_entry) { 3447 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 3448 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 3449 3450 if (is_unicast_ether_addr(mac_addr)) { 3451 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter, 3452 ice_aqc_opc_add_sw_rules); 3453 r_iter = (struct ice_aqc_sw_rules_elem *) 3454 ((u8 *)r_iter + s_rule_size); 3455 } 3456 } 3457 3458 /* Call AQ bulk switch rule update for all unicast addresses */ 3459 r_iter = s_rule; 3460 /* Call AQ switch rule in AQ_MAX chunk */ 3461 for (total_elem_left = num_unicast; total_elem_left > 0; 3462 total_elem_left -= elem_sent) { 3463 struct ice_aqc_sw_rules_elem *entry = r_iter; 3464 3465 elem_sent = min_t(u8, total_elem_left, 3466 (ICE_AQ_MAX_BUF_LEN / s_rule_size)); 3467 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, 3468 elem_sent, ice_aqc_opc_add_sw_rules, 3469 NULL); 3470 if (status) 3471 goto ice_add_mac_exit; 3472 r_iter = (struct ice_aqc_sw_rules_elem *) 3473 ((u8 *)r_iter + (elem_sent * s_rule_size)); 3474 } 3475 3476 /* Fill up rule ID based on the value returned from FW */ 3477 r_iter = s_rule; 3478 list_for_each_entry(m_list_itr, m_list, list_entry) { 3479 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 3480 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 3481 struct ice_fltr_mgmt_list_entry *fm_entry; 3482 3483 if (is_unicast_ether_addr(mac_addr)) { 3484 f_info->fltr_rule_id = 3485 le16_to_cpu(r_iter->pdata.lkup_tx_rx.index); 3486 f_info->fltr_act = ICE_FWD_TO_VSI; 3487 /* Create an entry to track this MAC address */ 3488 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), 3489 sizeof(*fm_entry), GFP_KERNEL); 3490 if (!fm_entry) { 3491 status = -ENOMEM; 3492 goto ice_add_mac_exit; 3493 } 3494 fm_entry->fltr_info = *f_info; 3495 fm_entry->vsi_count = 1; 3496 /* The book keeping entries will get removed when 3497 * base driver calls remove filter AQ command 3498 */ 3499 3500 list_add(&fm_entry->list_entry, rule_head); 3501 r_iter = (struct ice_aqc_sw_rules_elem *) 3502 ((u8 *)r_iter + s_rule_size); 3503 } 3504 } 3505 3506 ice_add_mac_exit: 3507 mutex_unlock(rule_lock); 3508 if (s_rule) 3509 devm_kfree(ice_hw_to_dev(hw), s_rule); 3510 return status; 3511 } 3512 3513 /** 3514 * ice_add_vlan_internal - Add one VLAN based filter rule 3515 * @hw: pointer to the hardware structure 3516 * @f_entry: filter entry containing one VLAN information 3517 */ 3518 static int 3519 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) 3520 { 3521 struct ice_switch_info *sw = hw->switch_info; 3522 struct ice_fltr_mgmt_list_entry *v_list_itr; 3523 struct ice_fltr_info *new_fltr, *cur_fltr; 3524 enum ice_sw_lkup_type lkup_type; 3525 u16 vsi_list_id = 0, vsi_handle; 3526 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3527 int status = 0; 3528 3529 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 3530 return -EINVAL; 3531 3532 f_entry->fltr_info.fwd_id.hw_vsi_id = 3533 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 3534 new_fltr = &f_entry->fltr_info; 3535 3536 /* VLAN ID should only be 12 bits */ 3537 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) 3538 return -EINVAL; 3539 3540 if (new_fltr->src_id != ICE_SRC_ID_VSI) 3541 return -EINVAL; 3542 3543 new_fltr->src = new_fltr->fwd_id.hw_vsi_id; 3544 lkup_type = new_fltr->lkup_type; 3545 vsi_handle = new_fltr->vsi_handle; 3546 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 3547 mutex_lock(rule_lock); 3548 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr); 3549 if (!v_list_itr) { 3550 struct ice_vsi_list_map_info *map_info = NULL; 3551 3552 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) { 3553 /* All VLAN pruning rules use a VSI list. Check if 3554 * there is already a VSI list containing VSI that we 3555 * want to add. If found, use the same vsi_list_id for 3556 * this new VLAN rule or else create a new list. 3557 */ 3558 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN, 3559 vsi_handle, 3560 &vsi_list_id); 3561 if (!map_info) { 3562 status = ice_create_vsi_list_rule(hw, 3563 &vsi_handle, 3564 1, 3565 &vsi_list_id, 3566 lkup_type); 3567 if (status) 3568 goto exit; 3569 } 3570 /* Convert the action to forwarding to a VSI list. */ 3571 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 3572 new_fltr->fwd_id.vsi_list_id = vsi_list_id; 3573 } 3574 3575 status = ice_create_pkt_fwd_rule(hw, f_entry); 3576 if (!status) { 3577 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, 3578 new_fltr); 3579 if (!v_list_itr) { 3580 status = -ENOENT; 3581 goto exit; 3582 } 3583 /* reuse VSI list for new rule and increment ref_cnt */ 3584 if (map_info) { 3585 v_list_itr->vsi_list_info = map_info; 3586 map_info->ref_cnt++; 3587 } else { 3588 v_list_itr->vsi_list_info = 3589 ice_create_vsi_list_map(hw, &vsi_handle, 3590 1, vsi_list_id); 3591 } 3592 } 3593 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) { 3594 /* Update existing VSI list to add new VSI ID only if it used 3595 * by one VLAN rule. 3596 */ 3597 cur_fltr = &v_list_itr->fltr_info; 3598 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, 3599 new_fltr); 3600 } else { 3601 /* If VLAN rule exists and VSI list being used by this rule is 3602 * referenced by more than 1 VLAN rule. Then create a new VSI 3603 * list appending previous VSI with new VSI and update existing 3604 * VLAN rule to point to new VSI list ID 3605 */ 3606 struct ice_fltr_info tmp_fltr; 3607 u16 vsi_handle_arr[2]; 3608 u16 cur_handle; 3609 3610 /* Current implementation only supports reusing VSI list with 3611 * one VSI count. We should never hit below condition 3612 */ 3613 if (v_list_itr->vsi_count > 1 && 3614 v_list_itr->vsi_list_info->ref_cnt > 1) { 3615 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); 3616 status = -EIO; 3617 goto exit; 3618 } 3619 3620 cur_handle = 3621 find_first_bit(v_list_itr->vsi_list_info->vsi_map, 3622 ICE_MAX_VSI); 3623 3624 /* A rule already exists with the new VSI being added */ 3625 if (cur_handle == vsi_handle) { 3626 status = -EEXIST; 3627 goto exit; 3628 } 3629 3630 vsi_handle_arr[0] = cur_handle; 3631 vsi_handle_arr[1] = vsi_handle; 3632 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 3633 &vsi_list_id, lkup_type); 3634 if (status) 3635 goto exit; 3636 3637 tmp_fltr = v_list_itr->fltr_info; 3638 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id; 3639 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 3640 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 3641 /* Update the previous switch rule to a new VSI list which 3642 * includes current VSI that is requested 3643 */ 3644 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 3645 if (status) 3646 goto exit; 3647 3648 /* before overriding VSI list map info. decrement ref_cnt of 3649 * previous VSI list 3650 */ 3651 v_list_itr->vsi_list_info->ref_cnt--; 3652 3653 /* now update to newly created list */ 3654 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id; 3655 v_list_itr->vsi_list_info = 3656 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 3657 vsi_list_id); 3658 v_list_itr->vsi_count++; 3659 } 3660 3661 exit: 3662 mutex_unlock(rule_lock); 3663 return status; 3664 } 3665 3666 /** 3667 * ice_add_vlan - Add VLAN based filter rule 3668 * @hw: pointer to the hardware structure 3669 * @v_list: list of VLAN entries and forwarding information 3670 */ 3671 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) 3672 { 3673 struct ice_fltr_list_entry *v_list_itr; 3674 3675 if (!v_list || !hw) 3676 return -EINVAL; 3677 3678 list_for_each_entry(v_list_itr, v_list, list_entry) { 3679 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) 3680 return -EINVAL; 3681 v_list_itr->fltr_info.flag = ICE_FLTR_TX; 3682 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); 3683 if (v_list_itr->status) 3684 return v_list_itr->status; 3685 } 3686 return 0; 3687 } 3688 3689 /** 3690 * ice_add_eth_mac - Add ethertype and MAC based filter rule 3691 * @hw: pointer to the hardware structure 3692 * @em_list: list of ether type MAC filter, MAC is optional 3693 * 3694 * This function requires the caller to populate the entries in 3695 * the filter list with the necessary fields (including flags to 3696 * indicate Tx or Rx rules). 3697 */ 3698 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) 3699 { 3700 struct ice_fltr_list_entry *em_list_itr; 3701 3702 if (!em_list || !hw) 3703 return -EINVAL; 3704 3705 list_for_each_entry(em_list_itr, em_list, list_entry) { 3706 enum ice_sw_lkup_type l_type = 3707 em_list_itr->fltr_info.lkup_type; 3708 3709 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 3710 l_type != ICE_SW_LKUP_ETHERTYPE) 3711 return -EINVAL; 3712 3713 em_list_itr->status = ice_add_rule_internal(hw, l_type, 3714 em_list_itr); 3715 if (em_list_itr->status) 3716 return em_list_itr->status; 3717 } 3718 return 0; 3719 } 3720 3721 /** 3722 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule 3723 * @hw: pointer to the hardware structure 3724 * @em_list: list of ethertype or ethertype MAC entries 3725 */ 3726 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) 3727 { 3728 struct ice_fltr_list_entry *em_list_itr, *tmp; 3729 3730 if (!em_list || !hw) 3731 return -EINVAL; 3732 3733 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) { 3734 enum ice_sw_lkup_type l_type = 3735 em_list_itr->fltr_info.lkup_type; 3736 3737 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 3738 l_type != ICE_SW_LKUP_ETHERTYPE) 3739 return -EINVAL; 3740 3741 em_list_itr->status = ice_remove_rule_internal(hw, l_type, 3742 em_list_itr); 3743 if (em_list_itr->status) 3744 return em_list_itr->status; 3745 } 3746 return 0; 3747 } 3748 3749 /** 3750 * ice_rem_sw_rule_info 3751 * @hw: pointer to the hardware structure 3752 * @rule_head: pointer to the switch list structure that we want to delete 3753 */ 3754 static void 3755 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head) 3756 { 3757 if (!list_empty(rule_head)) { 3758 struct ice_fltr_mgmt_list_entry *entry; 3759 struct ice_fltr_mgmt_list_entry *tmp; 3760 3761 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) { 3762 list_del(&entry->list_entry); 3763 devm_kfree(ice_hw_to_dev(hw), entry); 3764 } 3765 } 3766 } 3767 3768 /** 3769 * ice_rem_adv_rule_info 3770 * @hw: pointer to the hardware structure 3771 * @rule_head: pointer to the switch list structure that we want to delete 3772 */ 3773 static void 3774 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head) 3775 { 3776 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 3777 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 3778 3779 if (list_empty(rule_head)) 3780 return; 3781 3782 list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) { 3783 list_del(&lst_itr->list_entry); 3784 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 3785 devm_kfree(ice_hw_to_dev(hw), lst_itr); 3786 } 3787 } 3788 3789 /** 3790 * ice_cfg_dflt_vsi - change state of VSI to set/clear default 3791 * @hw: pointer to the hardware structure 3792 * @vsi_handle: VSI handle to set as default 3793 * @set: true to add the above mentioned switch rule, false to remove it 3794 * @direction: ICE_FLTR_RX or ICE_FLTR_TX 3795 * 3796 * add filter rule to set/unset given VSI as default VSI for the switch 3797 * (represented by swid) 3798 */ 3799 int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) 3800 { 3801 struct ice_aqc_sw_rules_elem *s_rule; 3802 struct ice_fltr_info f_info; 3803 enum ice_adminq_opc opcode; 3804 u16 s_rule_size; 3805 u16 hw_vsi_id; 3806 int status; 3807 3808 if (!ice_is_vsi_valid(hw, vsi_handle)) 3809 return -EINVAL; 3810 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3811 3812 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : 3813 ICE_SW_RULE_RX_TX_NO_HDR_SIZE; 3814 3815 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 3816 if (!s_rule) 3817 return -ENOMEM; 3818 3819 memset(&f_info, 0, sizeof(f_info)); 3820 3821 f_info.lkup_type = ICE_SW_LKUP_DFLT; 3822 f_info.flag = direction; 3823 f_info.fltr_act = ICE_FWD_TO_VSI; 3824 f_info.fwd_id.hw_vsi_id = hw_vsi_id; 3825 3826 if (f_info.flag & ICE_FLTR_RX) { 3827 f_info.src = hw->port_info->lport; 3828 f_info.src_id = ICE_SRC_ID_LPORT; 3829 if (!set) 3830 f_info.fltr_rule_id = 3831 hw->port_info->dflt_rx_vsi_rule_id; 3832 } else if (f_info.flag & ICE_FLTR_TX) { 3833 f_info.src_id = ICE_SRC_ID_VSI; 3834 f_info.src = hw_vsi_id; 3835 if (!set) 3836 f_info.fltr_rule_id = 3837 hw->port_info->dflt_tx_vsi_rule_id; 3838 } 3839 3840 if (set) 3841 opcode = ice_aqc_opc_add_sw_rules; 3842 else 3843 opcode = ice_aqc_opc_remove_sw_rules; 3844 3845 ice_fill_sw_rule(hw, &f_info, s_rule, opcode); 3846 3847 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL); 3848 if (status || !(f_info.flag & ICE_FLTR_TX_RX)) 3849 goto out; 3850 if (set) { 3851 u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 3852 3853 if (f_info.flag & ICE_FLTR_TX) { 3854 hw->port_info->dflt_tx_vsi_num = hw_vsi_id; 3855 hw->port_info->dflt_tx_vsi_rule_id = index; 3856 } else if (f_info.flag & ICE_FLTR_RX) { 3857 hw->port_info->dflt_rx_vsi_num = hw_vsi_id; 3858 hw->port_info->dflt_rx_vsi_rule_id = index; 3859 } 3860 } else { 3861 if (f_info.flag & ICE_FLTR_TX) { 3862 hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 3863 hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT; 3864 } else if (f_info.flag & ICE_FLTR_RX) { 3865 hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 3866 hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT; 3867 } 3868 } 3869 3870 out: 3871 devm_kfree(ice_hw_to_dev(hw), s_rule); 3872 return status; 3873 } 3874 3875 /** 3876 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry 3877 * @hw: pointer to the hardware structure 3878 * @recp_id: lookup type for which the specified rule needs to be searched 3879 * @f_info: rule information 3880 * 3881 * Helper function to search for a unicast rule entry - this is to be used 3882 * to remove unicast MAC filter that is not shared with other VSIs on the 3883 * PF switch. 3884 * 3885 * Returns pointer to entry storing the rule if found 3886 */ 3887 static struct ice_fltr_mgmt_list_entry * 3888 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id, 3889 struct ice_fltr_info *f_info) 3890 { 3891 struct ice_switch_info *sw = hw->switch_info; 3892 struct ice_fltr_mgmt_list_entry *list_itr; 3893 struct list_head *list_head; 3894 3895 list_head = &sw->recp_list[recp_id].filt_rules; 3896 list_for_each_entry(list_itr, list_head, list_entry) { 3897 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 3898 sizeof(f_info->l_data)) && 3899 f_info->fwd_id.hw_vsi_id == 3900 list_itr->fltr_info.fwd_id.hw_vsi_id && 3901 f_info->flag == list_itr->fltr_info.flag) 3902 return list_itr; 3903 } 3904 return NULL; 3905 } 3906 3907 /** 3908 * ice_remove_mac - remove a MAC address based filter rule 3909 * @hw: pointer to the hardware structure 3910 * @m_list: list of MAC addresses and forwarding information 3911 * 3912 * This function removes either a MAC filter rule or a specific VSI from a 3913 * VSI list for a multicast MAC address. 3914 * 3915 * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should 3916 * be aware that this call will only work if all the entries passed into m_list 3917 * were added previously. It will not attempt to do a partial remove of entries 3918 * that were found. 3919 */ 3920 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) 3921 { 3922 struct ice_fltr_list_entry *list_itr, *tmp; 3923 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3924 3925 if (!m_list) 3926 return -EINVAL; 3927 3928 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 3929 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { 3930 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; 3931 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0]; 3932 u16 vsi_handle; 3933 3934 if (l_type != ICE_SW_LKUP_MAC) 3935 return -EINVAL; 3936 3937 vsi_handle = list_itr->fltr_info.vsi_handle; 3938 if (!ice_is_vsi_valid(hw, vsi_handle)) 3939 return -EINVAL; 3940 3941 list_itr->fltr_info.fwd_id.hw_vsi_id = 3942 ice_get_hw_vsi_num(hw, vsi_handle); 3943 if (is_unicast_ether_addr(add) && !hw->ucast_shared) { 3944 /* Don't remove the unicast address that belongs to 3945 * another VSI on the switch, since it is not being 3946 * shared... 3947 */ 3948 mutex_lock(rule_lock); 3949 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC, 3950 &list_itr->fltr_info)) { 3951 mutex_unlock(rule_lock); 3952 return -ENOENT; 3953 } 3954 mutex_unlock(rule_lock); 3955 } 3956 list_itr->status = ice_remove_rule_internal(hw, 3957 ICE_SW_LKUP_MAC, 3958 list_itr); 3959 if (list_itr->status) 3960 return list_itr->status; 3961 } 3962 return 0; 3963 } 3964 3965 /** 3966 * ice_remove_vlan - Remove VLAN based filter rule 3967 * @hw: pointer to the hardware structure 3968 * @v_list: list of VLAN entries and forwarding information 3969 */ 3970 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) 3971 { 3972 struct ice_fltr_list_entry *v_list_itr, *tmp; 3973 3974 if (!v_list || !hw) 3975 return -EINVAL; 3976 3977 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 3978 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; 3979 3980 if (l_type != ICE_SW_LKUP_VLAN) 3981 return -EINVAL; 3982 v_list_itr->status = ice_remove_rule_internal(hw, 3983 ICE_SW_LKUP_VLAN, 3984 v_list_itr); 3985 if (v_list_itr->status) 3986 return v_list_itr->status; 3987 } 3988 return 0; 3989 } 3990 3991 /** 3992 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter 3993 * @fm_entry: filter entry to inspect 3994 * @vsi_handle: VSI handle to compare with filter info 3995 */ 3996 static bool 3997 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) 3998 { 3999 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && 4000 fm_entry->fltr_info.vsi_handle == vsi_handle) || 4001 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && 4002 fm_entry->vsi_list_info && 4003 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map)))); 4004 } 4005 4006 /** 4007 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list 4008 * @hw: pointer to the hardware structure 4009 * @vsi_handle: VSI handle to remove filters from 4010 * @vsi_list_head: pointer to the list to add entry to 4011 * @fi: pointer to fltr_info of filter entry to copy & add 4012 * 4013 * Helper function, used when creating a list of filters to remove from 4014 * a specific VSI. The entry added to vsi_list_head is a COPY of the 4015 * original filter entry, with the exception of fltr_info.fltr_act and 4016 * fltr_info.fwd_id fields. These are set such that later logic can 4017 * extract which VSI to remove the fltr from, and pass on that information. 4018 */ 4019 static int 4020 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 4021 struct list_head *vsi_list_head, 4022 struct ice_fltr_info *fi) 4023 { 4024 struct ice_fltr_list_entry *tmp; 4025 4026 /* this memory is freed up in the caller function 4027 * once filters for this VSI are removed 4028 */ 4029 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); 4030 if (!tmp) 4031 return -ENOMEM; 4032 4033 tmp->fltr_info = *fi; 4034 4035 /* Overwrite these fields to indicate which VSI to remove filter from, 4036 * so find and remove logic can extract the information from the 4037 * list entries. Note that original entries will still have proper 4038 * values. 4039 */ 4040 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 4041 tmp->fltr_info.vsi_handle = vsi_handle; 4042 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4043 4044 list_add(&tmp->list_entry, vsi_list_head); 4045 4046 return 0; 4047 } 4048 4049 /** 4050 * ice_add_to_vsi_fltr_list - Add VSI filters to the list 4051 * @hw: pointer to the hardware structure 4052 * @vsi_handle: VSI handle to remove filters from 4053 * @lkup_list_head: pointer to the list that has certain lookup type filters 4054 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle 4055 * 4056 * Locates all filters in lkup_list_head that are used by the given VSI, 4057 * and adds COPIES of those entries to vsi_list_head (intended to be used 4058 * to remove the listed filters). 4059 * Note that this means all entries in vsi_list_head must be explicitly 4060 * deallocated by the caller when done with list. 4061 */ 4062 static int 4063 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 4064 struct list_head *lkup_list_head, 4065 struct list_head *vsi_list_head) 4066 { 4067 struct ice_fltr_mgmt_list_entry *fm_entry; 4068 int status = 0; 4069 4070 /* check to make sure VSI ID is valid and within boundary */ 4071 if (!ice_is_vsi_valid(hw, vsi_handle)) 4072 return -EINVAL; 4073 4074 list_for_each_entry(fm_entry, lkup_list_head, list_entry) { 4075 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) 4076 continue; 4077 4078 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 4079 vsi_list_head, 4080 &fm_entry->fltr_info); 4081 if (status) 4082 return status; 4083 } 4084 return status; 4085 } 4086 4087 /** 4088 * ice_determine_promisc_mask 4089 * @fi: filter info to parse 4090 * 4091 * Helper function to determine which ICE_PROMISC_ mask corresponds 4092 * to given filter into. 4093 */ 4094 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) 4095 { 4096 u16 vid = fi->l_data.mac_vlan.vlan_id; 4097 u8 *macaddr = fi->l_data.mac.mac_addr; 4098 bool is_tx_fltr = false; 4099 u8 promisc_mask = 0; 4100 4101 if (fi->flag == ICE_FLTR_TX) 4102 is_tx_fltr = true; 4103 4104 if (is_broadcast_ether_addr(macaddr)) 4105 promisc_mask |= is_tx_fltr ? 4106 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX; 4107 else if (is_multicast_ether_addr(macaddr)) 4108 promisc_mask |= is_tx_fltr ? 4109 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX; 4110 else if (is_unicast_ether_addr(macaddr)) 4111 promisc_mask |= is_tx_fltr ? 4112 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX; 4113 if (vid) 4114 promisc_mask |= is_tx_fltr ? 4115 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX; 4116 4117 return promisc_mask; 4118 } 4119 4120 /** 4121 * ice_remove_promisc - Remove promisc based filter rules 4122 * @hw: pointer to the hardware structure 4123 * @recp_id: recipe ID for which the rule needs to removed 4124 * @v_list: list of promisc entries 4125 */ 4126 static int 4127 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list) 4128 { 4129 struct ice_fltr_list_entry *v_list_itr, *tmp; 4130 4131 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 4132 v_list_itr->status = 4133 ice_remove_rule_internal(hw, recp_id, v_list_itr); 4134 if (v_list_itr->status) 4135 return v_list_itr->status; 4136 } 4137 return 0; 4138 } 4139 4140 /** 4141 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI 4142 * @hw: pointer to the hardware structure 4143 * @vsi_handle: VSI handle to clear mode 4144 * @promisc_mask: mask of promiscuous config bits to clear 4145 * @vid: VLAN ID to clear VLAN promiscuous 4146 */ 4147 int 4148 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 4149 u16 vid) 4150 { 4151 struct ice_switch_info *sw = hw->switch_info; 4152 struct ice_fltr_list_entry *fm_entry, *tmp; 4153 struct list_head remove_list_head; 4154 struct ice_fltr_mgmt_list_entry *itr; 4155 struct list_head *rule_head; 4156 struct mutex *rule_lock; /* Lock to protect filter rule list */ 4157 int status = 0; 4158 u8 recipe_id; 4159 4160 if (!ice_is_vsi_valid(hw, vsi_handle)) 4161 return -EINVAL; 4162 4163 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) 4164 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 4165 else 4166 recipe_id = ICE_SW_LKUP_PROMISC; 4167 4168 rule_head = &sw->recp_list[recipe_id].filt_rules; 4169 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock; 4170 4171 INIT_LIST_HEAD(&remove_list_head); 4172 4173 mutex_lock(rule_lock); 4174 list_for_each_entry(itr, rule_head, list_entry) { 4175 struct ice_fltr_info *fltr_info; 4176 u8 fltr_promisc_mask = 0; 4177 4178 if (!ice_vsi_uses_fltr(itr, vsi_handle)) 4179 continue; 4180 fltr_info = &itr->fltr_info; 4181 4182 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN && 4183 vid != fltr_info->l_data.mac_vlan.vlan_id) 4184 continue; 4185 4186 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info); 4187 4188 /* Skip if filter is not completely specified by given mask */ 4189 if (fltr_promisc_mask & ~promisc_mask) 4190 continue; 4191 4192 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 4193 &remove_list_head, 4194 fltr_info); 4195 if (status) { 4196 mutex_unlock(rule_lock); 4197 goto free_fltr_list; 4198 } 4199 } 4200 mutex_unlock(rule_lock); 4201 4202 status = ice_remove_promisc(hw, recipe_id, &remove_list_head); 4203 4204 free_fltr_list: 4205 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 4206 list_del(&fm_entry->list_entry); 4207 devm_kfree(ice_hw_to_dev(hw), fm_entry); 4208 } 4209 4210 return status; 4211 } 4212 4213 /** 4214 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s) 4215 * @hw: pointer to the hardware structure 4216 * @vsi_handle: VSI handle to configure 4217 * @promisc_mask: mask of promiscuous config bits 4218 * @vid: VLAN ID to set VLAN promiscuous 4219 */ 4220 int 4221 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) 4222 { 4223 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; 4224 struct ice_fltr_list_entry f_list_entry; 4225 struct ice_fltr_info new_fltr; 4226 bool is_tx_fltr; 4227 int status = 0; 4228 u16 hw_vsi_id; 4229 int pkt_type; 4230 u8 recipe_id; 4231 4232 if (!ice_is_vsi_valid(hw, vsi_handle)) 4233 return -EINVAL; 4234 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4235 4236 memset(&new_fltr, 0, sizeof(new_fltr)); 4237 4238 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) { 4239 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN; 4240 new_fltr.l_data.mac_vlan.vlan_id = vid; 4241 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 4242 } else { 4243 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC; 4244 recipe_id = ICE_SW_LKUP_PROMISC; 4245 } 4246 4247 /* Separate filters must be set for each direction/packet type 4248 * combination, so we will loop over the mask value, store the 4249 * individual type, and clear it out in the input mask as it 4250 * is found. 4251 */ 4252 while (promisc_mask) { 4253 u8 *mac_addr; 4254 4255 pkt_type = 0; 4256 is_tx_fltr = false; 4257 4258 if (promisc_mask & ICE_PROMISC_UCAST_RX) { 4259 promisc_mask &= ~ICE_PROMISC_UCAST_RX; 4260 pkt_type = UCAST_FLTR; 4261 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) { 4262 promisc_mask &= ~ICE_PROMISC_UCAST_TX; 4263 pkt_type = UCAST_FLTR; 4264 is_tx_fltr = true; 4265 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) { 4266 promisc_mask &= ~ICE_PROMISC_MCAST_RX; 4267 pkt_type = MCAST_FLTR; 4268 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) { 4269 promisc_mask &= ~ICE_PROMISC_MCAST_TX; 4270 pkt_type = MCAST_FLTR; 4271 is_tx_fltr = true; 4272 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) { 4273 promisc_mask &= ~ICE_PROMISC_BCAST_RX; 4274 pkt_type = BCAST_FLTR; 4275 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) { 4276 promisc_mask &= ~ICE_PROMISC_BCAST_TX; 4277 pkt_type = BCAST_FLTR; 4278 is_tx_fltr = true; 4279 } 4280 4281 /* Check for VLAN promiscuous flag */ 4282 if (promisc_mask & ICE_PROMISC_VLAN_RX) { 4283 promisc_mask &= ~ICE_PROMISC_VLAN_RX; 4284 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) { 4285 promisc_mask &= ~ICE_PROMISC_VLAN_TX; 4286 is_tx_fltr = true; 4287 } 4288 4289 /* Set filter DA based on packet type */ 4290 mac_addr = new_fltr.l_data.mac.mac_addr; 4291 if (pkt_type == BCAST_FLTR) { 4292 eth_broadcast_addr(mac_addr); 4293 } else if (pkt_type == MCAST_FLTR || 4294 pkt_type == UCAST_FLTR) { 4295 /* Use the dummy ether header DA */ 4296 ether_addr_copy(mac_addr, dummy_eth_header); 4297 if (pkt_type == MCAST_FLTR) 4298 mac_addr[0] |= 0x1; /* Set multicast bit */ 4299 } 4300 4301 /* Need to reset this to zero for all iterations */ 4302 new_fltr.flag = 0; 4303 if (is_tx_fltr) { 4304 new_fltr.flag |= ICE_FLTR_TX; 4305 new_fltr.src = hw_vsi_id; 4306 } else { 4307 new_fltr.flag |= ICE_FLTR_RX; 4308 new_fltr.src = hw->port_info->lport; 4309 } 4310 4311 new_fltr.fltr_act = ICE_FWD_TO_VSI; 4312 new_fltr.vsi_handle = vsi_handle; 4313 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id; 4314 f_list_entry.fltr_info = new_fltr; 4315 4316 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry); 4317 if (status) 4318 goto set_promisc_exit; 4319 } 4320 4321 set_promisc_exit: 4322 return status; 4323 } 4324 4325 /** 4326 * ice_set_vlan_vsi_promisc 4327 * @hw: pointer to the hardware structure 4328 * @vsi_handle: VSI handle to configure 4329 * @promisc_mask: mask of promiscuous config bits 4330 * @rm_vlan_promisc: Clear VLANs VSI promisc mode 4331 * 4332 * Configure VSI with all associated VLANs to given promiscuous mode(s) 4333 */ 4334 int 4335 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 4336 bool rm_vlan_promisc) 4337 { 4338 struct ice_switch_info *sw = hw->switch_info; 4339 struct ice_fltr_list_entry *list_itr, *tmp; 4340 struct list_head vsi_list_head; 4341 struct list_head *vlan_head; 4342 struct mutex *vlan_lock; /* Lock to protect filter rule list */ 4343 u16 vlan_id; 4344 int status; 4345 4346 INIT_LIST_HEAD(&vsi_list_head); 4347 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 4348 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 4349 mutex_lock(vlan_lock); 4350 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head, 4351 &vsi_list_head); 4352 mutex_unlock(vlan_lock); 4353 if (status) 4354 goto free_fltr_list; 4355 4356 list_for_each_entry(list_itr, &vsi_list_head, list_entry) { 4357 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; 4358 if (rm_vlan_promisc) 4359 status = ice_clear_vsi_promisc(hw, vsi_handle, 4360 promisc_mask, vlan_id); 4361 else 4362 status = ice_set_vsi_promisc(hw, vsi_handle, 4363 promisc_mask, vlan_id); 4364 if (status) 4365 break; 4366 } 4367 4368 free_fltr_list: 4369 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) { 4370 list_del(&list_itr->list_entry); 4371 devm_kfree(ice_hw_to_dev(hw), list_itr); 4372 } 4373 return status; 4374 } 4375 4376 /** 4377 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI 4378 * @hw: pointer to the hardware structure 4379 * @vsi_handle: VSI handle to remove filters from 4380 * @lkup: switch rule filter lookup type 4381 */ 4382 static void 4383 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, 4384 enum ice_sw_lkup_type lkup) 4385 { 4386 struct ice_switch_info *sw = hw->switch_info; 4387 struct ice_fltr_list_entry *fm_entry; 4388 struct list_head remove_list_head; 4389 struct list_head *rule_head; 4390 struct ice_fltr_list_entry *tmp; 4391 struct mutex *rule_lock; /* Lock to protect filter rule list */ 4392 int status; 4393 4394 INIT_LIST_HEAD(&remove_list_head); 4395 rule_lock = &sw->recp_list[lkup].filt_rule_lock; 4396 rule_head = &sw->recp_list[lkup].filt_rules; 4397 mutex_lock(rule_lock); 4398 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head, 4399 &remove_list_head); 4400 mutex_unlock(rule_lock); 4401 if (status) 4402 goto free_fltr_list; 4403 4404 switch (lkup) { 4405 case ICE_SW_LKUP_MAC: 4406 ice_remove_mac(hw, &remove_list_head); 4407 break; 4408 case ICE_SW_LKUP_VLAN: 4409 ice_remove_vlan(hw, &remove_list_head); 4410 break; 4411 case ICE_SW_LKUP_PROMISC: 4412 case ICE_SW_LKUP_PROMISC_VLAN: 4413 ice_remove_promisc(hw, lkup, &remove_list_head); 4414 break; 4415 case ICE_SW_LKUP_MAC_VLAN: 4416 case ICE_SW_LKUP_ETHERTYPE: 4417 case ICE_SW_LKUP_ETHERTYPE_MAC: 4418 case ICE_SW_LKUP_DFLT: 4419 case ICE_SW_LKUP_LAST: 4420 default: 4421 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup); 4422 break; 4423 } 4424 4425 free_fltr_list: 4426 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 4427 list_del(&fm_entry->list_entry); 4428 devm_kfree(ice_hw_to_dev(hw), fm_entry); 4429 } 4430 } 4431 4432 /** 4433 * ice_remove_vsi_fltr - Remove all filters for a VSI 4434 * @hw: pointer to the hardware structure 4435 * @vsi_handle: VSI handle to remove filters from 4436 */ 4437 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) 4438 { 4439 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC); 4440 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN); 4441 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC); 4442 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN); 4443 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT); 4444 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE); 4445 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC); 4446 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN); 4447 } 4448 4449 /** 4450 * ice_alloc_res_cntr - allocating resource counter 4451 * @hw: pointer to the hardware structure 4452 * @type: type of resource 4453 * @alloc_shared: if set it is shared else dedicated 4454 * @num_items: number of entries requested for FD resource type 4455 * @counter_id: counter index returned by AQ call 4456 */ 4457 int 4458 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 4459 u16 *counter_id) 4460 { 4461 struct ice_aqc_alloc_free_res_elem *buf; 4462 u16 buf_len; 4463 int status; 4464 4465 /* Allocate resource */ 4466 buf_len = struct_size(buf, elem, 1); 4467 buf = kzalloc(buf_len, GFP_KERNEL); 4468 if (!buf) 4469 return -ENOMEM; 4470 4471 buf->num_elems = cpu_to_le16(num_items); 4472 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 4473 ICE_AQC_RES_TYPE_M) | alloc_shared); 4474 4475 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 4476 ice_aqc_opc_alloc_res, NULL); 4477 if (status) 4478 goto exit; 4479 4480 *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp); 4481 4482 exit: 4483 kfree(buf); 4484 return status; 4485 } 4486 4487 /** 4488 * ice_free_res_cntr - free resource counter 4489 * @hw: pointer to the hardware structure 4490 * @type: type of resource 4491 * @alloc_shared: if set it is shared else dedicated 4492 * @num_items: number of entries to be freed for FD resource type 4493 * @counter_id: counter ID resource which needs to be freed 4494 */ 4495 int 4496 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 4497 u16 counter_id) 4498 { 4499 struct ice_aqc_alloc_free_res_elem *buf; 4500 u16 buf_len; 4501 int status; 4502 4503 /* Free resource */ 4504 buf_len = struct_size(buf, elem, 1); 4505 buf = kzalloc(buf_len, GFP_KERNEL); 4506 if (!buf) 4507 return -ENOMEM; 4508 4509 buf->num_elems = cpu_to_le16(num_items); 4510 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 4511 ICE_AQC_RES_TYPE_M) | alloc_shared); 4512 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id); 4513 4514 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 4515 ice_aqc_opc_free_res, NULL); 4516 if (status) 4517 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n"); 4518 4519 kfree(buf); 4520 return status; 4521 } 4522 4523 /* This is mapping table entry that maps every word within a given protocol 4524 * structure to the real byte offset as per the specification of that 4525 * protocol header. 4526 * for example dst address is 3 words in ethertype header and corresponding 4527 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8 4528 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a 4529 * matching entry describing its field. This needs to be updated if new 4530 * structure is added to that union. 4531 */ 4532 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { 4533 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } }, 4534 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } }, 4535 { ICE_ETYPE_OL, { 0 } }, 4536 { ICE_ETYPE_IL, { 0 } }, 4537 { ICE_VLAN_OFOS, { 2, 0 } }, 4538 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, 4539 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, 4540 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 4541 26, 28, 30, 32, 34, 36, 38 } }, 4542 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 4543 26, 28, 30, 32, 34, 36, 38 } }, 4544 { ICE_TCP_IL, { 0, 2 } }, 4545 { ICE_UDP_OF, { 0, 2 } }, 4546 { ICE_UDP_ILOS, { 0, 2 } }, 4547 { ICE_VXLAN, { 8, 10, 12, 14 } }, 4548 { ICE_GENEVE, { 8, 10, 12, 14 } }, 4549 { ICE_NVGRE, { 0, 2, 4, 6 } }, 4550 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } }, 4551 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } }, 4552 }; 4553 4554 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { 4555 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW }, 4556 { ICE_MAC_IL, ICE_MAC_IL_HW }, 4557 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW }, 4558 { ICE_ETYPE_IL, ICE_ETYPE_IL_HW }, 4559 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW }, 4560 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW }, 4561 { ICE_IPV4_IL, ICE_IPV4_IL_HW }, 4562 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW }, 4563 { ICE_IPV6_IL, ICE_IPV6_IL_HW }, 4564 { ICE_TCP_IL, ICE_TCP_IL_HW }, 4565 { ICE_UDP_OF, ICE_UDP_OF_HW }, 4566 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW }, 4567 { ICE_VXLAN, ICE_UDP_OF_HW }, 4568 { ICE_GENEVE, ICE_UDP_OF_HW }, 4569 { ICE_NVGRE, ICE_GRE_OF_HW }, 4570 { ICE_GTP, ICE_UDP_OF_HW }, 4571 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW }, 4572 }; 4573 4574 /** 4575 * ice_find_recp - find a recipe 4576 * @hw: pointer to the hardware structure 4577 * @lkup_exts: extension sequence to match 4578 * @tun_type: type of recipe tunnel 4579 * 4580 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. 4581 */ 4582 static u16 4583 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, 4584 enum ice_sw_tunnel_type tun_type) 4585 { 4586 bool refresh_required = true; 4587 struct ice_sw_recipe *recp; 4588 u8 i; 4589 4590 /* Walk through existing recipes to find a match */ 4591 recp = hw->switch_info->recp_list; 4592 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 4593 /* If recipe was not created for this ID, in SW bookkeeping, 4594 * check if FW has an entry for this recipe. If the FW has an 4595 * entry update it in our SW bookkeeping and continue with the 4596 * matching. 4597 */ 4598 if (!recp[i].recp_created) 4599 if (ice_get_recp_frm_fw(hw, 4600 hw->switch_info->recp_list, i, 4601 &refresh_required)) 4602 continue; 4603 4604 /* Skip inverse action recipes */ 4605 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl & 4606 ICE_AQ_RECIPE_ACT_INV_ACT) 4607 continue; 4608 4609 /* if number of words we are looking for match */ 4610 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) { 4611 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words; 4612 struct ice_fv_word *be = lkup_exts->fv_words; 4613 u16 *cr = recp[i].lkup_exts.field_mask; 4614 u16 *de = lkup_exts->field_mask; 4615 bool found = true; 4616 u8 pe, qr; 4617 4618 /* ar, cr, and qr are related to the recipe words, while 4619 * be, de, and pe are related to the lookup words 4620 */ 4621 for (pe = 0; pe < lkup_exts->n_val_words; pe++) { 4622 for (qr = 0; qr < recp[i].lkup_exts.n_val_words; 4623 qr++) { 4624 if (ar[qr].off == be[pe].off && 4625 ar[qr].prot_id == be[pe].prot_id && 4626 cr[qr] == de[pe]) 4627 /* Found the "pe"th word in the 4628 * given recipe 4629 */ 4630 break; 4631 } 4632 /* After walking through all the words in the 4633 * "i"th recipe if "p"th word was not found then 4634 * this recipe is not what we are looking for. 4635 * So break out from this loop and try the next 4636 * recipe 4637 */ 4638 if (qr >= recp[i].lkup_exts.n_val_words) { 4639 found = false; 4640 break; 4641 } 4642 } 4643 /* If for "i"th recipe the found was never set to false 4644 * then it means we found our match 4645 * Also tun type of recipe needs to be checked 4646 */ 4647 if (found && recp[i].tun_type == tun_type) 4648 return i; /* Return the recipe ID */ 4649 } 4650 } 4651 return ICE_MAX_NUM_RECIPES; 4652 } 4653 4654 /** 4655 * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl 4656 * 4657 * As protocol id for outer vlan is different in dvm and svm, if dvm is 4658 * supported protocol array record for outer vlan has to be modified to 4659 * reflect the value proper for DVM. 4660 */ 4661 void ice_change_proto_id_to_dvm(void) 4662 { 4663 u8 i; 4664 4665 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++) 4666 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS && 4667 ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW) 4668 ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW; 4669 } 4670 4671 /** 4672 * ice_prot_type_to_id - get protocol ID from protocol type 4673 * @type: protocol type 4674 * @id: pointer to variable that will receive the ID 4675 * 4676 * Returns true if found, false otherwise 4677 */ 4678 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id) 4679 { 4680 u8 i; 4681 4682 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++) 4683 if (ice_prot_id_tbl[i].type == type) { 4684 *id = ice_prot_id_tbl[i].protocol_id; 4685 return true; 4686 } 4687 return false; 4688 } 4689 4690 /** 4691 * ice_fill_valid_words - count valid words 4692 * @rule: advanced rule with lookup information 4693 * @lkup_exts: byte offset extractions of the words that are valid 4694 * 4695 * calculate valid words in a lookup rule using mask value 4696 */ 4697 static u8 4698 ice_fill_valid_words(struct ice_adv_lkup_elem *rule, 4699 struct ice_prot_lkup_ext *lkup_exts) 4700 { 4701 u8 j, word, prot_id, ret_val; 4702 4703 if (!ice_prot_type_to_id(rule->type, &prot_id)) 4704 return 0; 4705 4706 word = lkup_exts->n_val_words; 4707 4708 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++) 4709 if (((u16 *)&rule->m_u)[j] && 4710 rule->type < ARRAY_SIZE(ice_prot_ext)) { 4711 /* No more space to accommodate */ 4712 if (word >= ICE_MAX_CHAIN_WORDS) 4713 return 0; 4714 lkup_exts->fv_words[word].off = 4715 ice_prot_ext[rule->type].offs[j]; 4716 lkup_exts->fv_words[word].prot_id = 4717 ice_prot_id_tbl[rule->type].protocol_id; 4718 lkup_exts->field_mask[word] = 4719 be16_to_cpu(((__force __be16 *)&rule->m_u)[j]); 4720 word++; 4721 } 4722 4723 ret_val = word - lkup_exts->n_val_words; 4724 lkup_exts->n_val_words = word; 4725 4726 return ret_val; 4727 } 4728 4729 /** 4730 * ice_create_first_fit_recp_def - Create a recipe grouping 4731 * @hw: pointer to the hardware structure 4732 * @lkup_exts: an array of protocol header extractions 4733 * @rg_list: pointer to a list that stores new recipe groups 4734 * @recp_cnt: pointer to a variable that stores returned number of recipe groups 4735 * 4736 * Using first fit algorithm, take all the words that are still not done 4737 * and start grouping them in 4-word groups. Each group makes up one 4738 * recipe. 4739 */ 4740 static int 4741 ice_create_first_fit_recp_def(struct ice_hw *hw, 4742 struct ice_prot_lkup_ext *lkup_exts, 4743 struct list_head *rg_list, 4744 u8 *recp_cnt) 4745 { 4746 struct ice_pref_recipe_group *grp = NULL; 4747 u8 j; 4748 4749 *recp_cnt = 0; 4750 4751 /* Walk through every word in the rule to check if it is not done. If so 4752 * then this word needs to be part of a new recipe. 4753 */ 4754 for (j = 0; j < lkup_exts->n_val_words; j++) 4755 if (!test_bit(j, lkup_exts->done)) { 4756 if (!grp || 4757 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) { 4758 struct ice_recp_grp_entry *entry; 4759 4760 entry = devm_kzalloc(ice_hw_to_dev(hw), 4761 sizeof(*entry), 4762 GFP_KERNEL); 4763 if (!entry) 4764 return -ENOMEM; 4765 list_add(&entry->l_entry, rg_list); 4766 grp = &entry->r_group; 4767 (*recp_cnt)++; 4768 } 4769 4770 grp->pairs[grp->n_val_pairs].prot_id = 4771 lkup_exts->fv_words[j].prot_id; 4772 grp->pairs[grp->n_val_pairs].off = 4773 lkup_exts->fv_words[j].off; 4774 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j]; 4775 grp->n_val_pairs++; 4776 } 4777 4778 return 0; 4779 } 4780 4781 /** 4782 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group 4783 * @hw: pointer to the hardware structure 4784 * @fv_list: field vector with the extraction sequence information 4785 * @rg_list: recipe groupings with protocol-offset pairs 4786 * 4787 * Helper function to fill in the field vector indices for protocol-offset 4788 * pairs. These indexes are then ultimately programmed into a recipe. 4789 */ 4790 static int 4791 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, 4792 struct list_head *rg_list) 4793 { 4794 struct ice_sw_fv_list_entry *fv; 4795 struct ice_recp_grp_entry *rg; 4796 struct ice_fv_word *fv_ext; 4797 4798 if (list_empty(fv_list)) 4799 return 0; 4800 4801 fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry, 4802 list_entry); 4803 fv_ext = fv->fv_ptr->ew; 4804 4805 list_for_each_entry(rg, rg_list, l_entry) { 4806 u8 i; 4807 4808 for (i = 0; i < rg->r_group.n_val_pairs; i++) { 4809 struct ice_fv_word *pr; 4810 bool found = false; 4811 u16 mask; 4812 u8 j; 4813 4814 pr = &rg->r_group.pairs[i]; 4815 mask = rg->r_group.mask[i]; 4816 4817 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 4818 if (fv_ext[j].prot_id == pr->prot_id && 4819 fv_ext[j].off == pr->off) { 4820 found = true; 4821 4822 /* Store index of field vector */ 4823 rg->fv_idx[i] = j; 4824 rg->fv_mask[i] = mask; 4825 break; 4826 } 4827 4828 /* Protocol/offset could not be found, caller gave an 4829 * invalid pair 4830 */ 4831 if (!found) 4832 return -EINVAL; 4833 } 4834 } 4835 4836 return 0; 4837 } 4838 4839 /** 4840 * ice_find_free_recp_res_idx - find free result indexes for recipe 4841 * @hw: pointer to hardware structure 4842 * @profiles: bitmap of profiles that will be associated with the new recipe 4843 * @free_idx: pointer to variable to receive the free index bitmap 4844 * 4845 * The algorithm used here is: 4846 * 1. When creating a new recipe, create a set P which contains all 4847 * Profiles that will be associated with our new recipe 4848 * 4849 * 2. For each Profile p in set P: 4850 * a. Add all recipes associated with Profile p into set R 4851 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes 4852 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF] 4853 * i. Or just assume they all have the same possible indexes: 4854 * 44, 45, 46, 47 4855 * i.e., PossibleIndexes = 0x0000F00000000000 4856 * 4857 * 3. For each Recipe r in set R: 4858 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes 4859 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes 4860 * 4861 * FreeIndexes will contain the bits indicating the indexes free for use, 4862 * then the code needs to update the recipe[r].used_result_idx_bits to 4863 * indicate which indexes were selected for use by this recipe. 4864 */ 4865 static u16 4866 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, 4867 unsigned long *free_idx) 4868 { 4869 DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS); 4870 DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES); 4871 DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS); 4872 u16 bit; 4873 4874 bitmap_zero(recipes, ICE_MAX_NUM_RECIPES); 4875 bitmap_zero(used_idx, ICE_MAX_FV_WORDS); 4876 4877 bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS); 4878 4879 /* For each profile we are going to associate the recipe with, add the 4880 * recipes that are associated with that profile. This will give us 4881 * the set of recipes that our recipe may collide with. Also, determine 4882 * what possible result indexes are usable given this set of profiles. 4883 */ 4884 for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) { 4885 bitmap_or(recipes, recipes, profile_to_recipe[bit], 4886 ICE_MAX_NUM_RECIPES); 4887 bitmap_and(possible_idx, possible_idx, 4888 hw->switch_info->prof_res_bm[bit], 4889 ICE_MAX_FV_WORDS); 4890 } 4891 4892 /* For each recipe that our new recipe may collide with, determine 4893 * which indexes have been used. 4894 */ 4895 for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES) 4896 bitmap_or(used_idx, used_idx, 4897 hw->switch_info->recp_list[bit].res_idxs, 4898 ICE_MAX_FV_WORDS); 4899 4900 bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS); 4901 4902 /* return number of free indexes */ 4903 return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS); 4904 } 4905 4906 /** 4907 * ice_add_sw_recipe - function to call AQ calls to create switch recipe 4908 * @hw: pointer to hardware structure 4909 * @rm: recipe management list entry 4910 * @profiles: bitmap of profiles that will be associated. 4911 */ 4912 static int 4913 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, 4914 unsigned long *profiles) 4915 { 4916 DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); 4917 struct ice_aqc_recipe_data_elem *tmp; 4918 struct ice_aqc_recipe_data_elem *buf; 4919 struct ice_recp_grp_entry *entry; 4920 u16 free_res_idx; 4921 u16 recipe_count; 4922 u8 chain_idx; 4923 u8 recps = 0; 4924 int status; 4925 4926 /* When more than one recipe are required, another recipe is needed to 4927 * chain them together. Matching a tunnel metadata ID takes up one of 4928 * the match fields in the chaining recipe reducing the number of 4929 * chained recipes by one. 4930 */ 4931 /* check number of free result indices */ 4932 bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS); 4933 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm); 4934 4935 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n", 4936 free_res_idx, rm->n_grp_count); 4937 4938 if (rm->n_grp_count > 1) { 4939 if (rm->n_grp_count > free_res_idx) 4940 return -ENOSPC; 4941 4942 rm->n_grp_count++; 4943 } 4944 4945 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) 4946 return -ENOSPC; 4947 4948 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); 4949 if (!tmp) 4950 return -ENOMEM; 4951 4952 buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), 4953 GFP_KERNEL); 4954 if (!buf) { 4955 status = -ENOMEM; 4956 goto err_mem; 4957 } 4958 4959 bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); 4960 recipe_count = ICE_MAX_NUM_RECIPES; 4961 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC, 4962 NULL); 4963 if (status || recipe_count == 0) 4964 goto err_unroll; 4965 4966 /* Allocate the recipe resources, and configure them according to the 4967 * match fields from protocol headers and extracted field vectors. 4968 */ 4969 chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); 4970 list_for_each_entry(entry, &rm->rg_list, l_entry) { 4971 u8 i; 4972 4973 status = ice_alloc_recipe(hw, &entry->rid); 4974 if (status) 4975 goto err_unroll; 4976 4977 /* Clear the result index of the located recipe, as this will be 4978 * updated, if needed, later in the recipe creation process. 4979 */ 4980 tmp[0].content.result_indx = 0; 4981 4982 buf[recps] = tmp[0]; 4983 buf[recps].recipe_indx = (u8)entry->rid; 4984 /* if the recipe is a non-root recipe RID should be programmed 4985 * as 0 for the rules to be applied correctly. 4986 */ 4987 buf[recps].content.rid = 0; 4988 memset(&buf[recps].content.lkup_indx, 0, 4989 sizeof(buf[recps].content.lkup_indx)); 4990 4991 /* All recipes use look-up index 0 to match switch ID. */ 4992 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 4993 buf[recps].content.mask[0] = 4994 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 4995 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask 4996 * to be 0 4997 */ 4998 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { 4999 buf[recps].content.lkup_indx[i] = 0x80; 5000 buf[recps].content.mask[i] = 0; 5001 } 5002 5003 for (i = 0; i < entry->r_group.n_val_pairs; i++) { 5004 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i]; 5005 buf[recps].content.mask[i + 1] = 5006 cpu_to_le16(entry->fv_mask[i]); 5007 } 5008 5009 if (rm->n_grp_count > 1) { 5010 /* Checks to see if there really is a valid result index 5011 * that can be used. 5012 */ 5013 if (chain_idx >= ICE_MAX_FV_WORDS) { 5014 ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); 5015 status = -ENOSPC; 5016 goto err_unroll; 5017 } 5018 5019 entry->chain_idx = chain_idx; 5020 buf[recps].content.result_indx = 5021 ICE_AQ_RECIPE_RESULT_EN | 5022 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & 5023 ICE_AQ_RECIPE_RESULT_DATA_M); 5024 clear_bit(chain_idx, result_idx_bm); 5025 chain_idx = find_first_bit(result_idx_bm, 5026 ICE_MAX_FV_WORDS); 5027 } 5028 5029 /* fill recipe dependencies */ 5030 bitmap_zero((unsigned long *)buf[recps].recipe_bitmap, 5031 ICE_MAX_NUM_RECIPES); 5032 set_bit(buf[recps].recipe_indx, 5033 (unsigned long *)buf[recps].recipe_bitmap); 5034 buf[recps].content.act_ctrl_fwd_priority = rm->priority; 5035 recps++; 5036 } 5037 5038 if (rm->n_grp_count == 1) { 5039 rm->root_rid = buf[0].recipe_indx; 5040 set_bit(buf[0].recipe_indx, rm->r_bitmap); 5041 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT; 5042 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) { 5043 memcpy(buf[0].recipe_bitmap, rm->r_bitmap, 5044 sizeof(buf[0].recipe_bitmap)); 5045 } else { 5046 status = -EINVAL; 5047 goto err_unroll; 5048 } 5049 /* Applicable only for ROOT_RECIPE, set the fwd_priority for 5050 * the recipe which is getting created if specified 5051 * by user. Usually any advanced switch filter, which results 5052 * into new extraction sequence, ended up creating a new recipe 5053 * of type ROOT and usually recipes are associated with profiles 5054 * Switch rule referreing newly created recipe, needs to have 5055 * either/or 'fwd' or 'join' priority, otherwise switch rule 5056 * evaluation will not happen correctly. In other words, if 5057 * switch rule to be evaluated on priority basis, then recipe 5058 * needs to have priority, otherwise it will be evaluated last. 5059 */ 5060 buf[0].content.act_ctrl_fwd_priority = rm->priority; 5061 } else { 5062 struct ice_recp_grp_entry *last_chain_entry; 5063 u16 rid, i; 5064 5065 /* Allocate the last recipe that will chain the outcomes of the 5066 * other recipes together 5067 */ 5068 status = ice_alloc_recipe(hw, &rid); 5069 if (status) 5070 goto err_unroll; 5071 5072 buf[recps].recipe_indx = (u8)rid; 5073 buf[recps].content.rid = (u8)rid; 5074 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT; 5075 /* the new entry created should also be part of rg_list to 5076 * make sure we have complete recipe 5077 */ 5078 last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw), 5079 sizeof(*last_chain_entry), 5080 GFP_KERNEL); 5081 if (!last_chain_entry) { 5082 status = -ENOMEM; 5083 goto err_unroll; 5084 } 5085 last_chain_entry->rid = rid; 5086 memset(&buf[recps].content.lkup_indx, 0, 5087 sizeof(buf[recps].content.lkup_indx)); 5088 /* All recipes use look-up index 0 to match switch ID. */ 5089 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 5090 buf[recps].content.mask[0] = 5091 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 5092 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { 5093 buf[recps].content.lkup_indx[i] = 5094 ICE_AQ_RECIPE_LKUP_IGNORE; 5095 buf[recps].content.mask[i] = 0; 5096 } 5097 5098 i = 1; 5099 /* update r_bitmap with the recp that is used for chaining */ 5100 set_bit(rid, rm->r_bitmap); 5101 /* this is the recipe that chains all the other recipes so it 5102 * should not have a chaining ID to indicate the same 5103 */ 5104 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; 5105 list_for_each_entry(entry, &rm->rg_list, l_entry) { 5106 last_chain_entry->fv_idx[i] = entry->chain_idx; 5107 buf[recps].content.lkup_indx[i] = entry->chain_idx; 5108 buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF); 5109 set_bit(entry->rid, rm->r_bitmap); 5110 } 5111 list_add(&last_chain_entry->l_entry, &rm->rg_list); 5112 if (sizeof(buf[recps].recipe_bitmap) >= 5113 sizeof(rm->r_bitmap)) { 5114 memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, 5115 sizeof(buf[recps].recipe_bitmap)); 5116 } else { 5117 status = -EINVAL; 5118 goto err_unroll; 5119 } 5120 buf[recps].content.act_ctrl_fwd_priority = rm->priority; 5121 5122 recps++; 5123 rm->root_rid = (u8)rid; 5124 } 5125 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 5126 if (status) 5127 goto err_unroll; 5128 5129 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL); 5130 ice_release_change_lock(hw); 5131 if (status) 5132 goto err_unroll; 5133 5134 /* Every recipe that just got created add it to the recipe 5135 * book keeping list 5136 */ 5137 list_for_each_entry(entry, &rm->rg_list, l_entry) { 5138 struct ice_switch_info *sw = hw->switch_info; 5139 bool is_root, idx_found = false; 5140 struct ice_sw_recipe *recp; 5141 u16 idx, buf_idx = 0; 5142 5143 /* find buffer index for copying some data */ 5144 for (idx = 0; idx < rm->n_grp_count; idx++) 5145 if (buf[idx].recipe_indx == entry->rid) { 5146 buf_idx = idx; 5147 idx_found = true; 5148 } 5149 5150 if (!idx_found) { 5151 status = -EIO; 5152 goto err_unroll; 5153 } 5154 5155 recp = &sw->recp_list[entry->rid]; 5156 is_root = (rm->root_rid == entry->rid); 5157 recp->is_root = is_root; 5158 5159 recp->root_rid = entry->rid; 5160 recp->big_recp = (is_root && rm->n_grp_count > 1); 5161 5162 memcpy(&recp->ext_words, entry->r_group.pairs, 5163 entry->r_group.n_val_pairs * sizeof(struct ice_fv_word)); 5164 5165 memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap, 5166 sizeof(recp->r_bitmap)); 5167 5168 /* Copy non-result fv index values and masks to recipe. This 5169 * call will also update the result recipe bitmask. 5170 */ 5171 ice_collect_result_idx(&buf[buf_idx], recp); 5172 5173 /* for non-root recipes, also copy to the root, this allows 5174 * easier matching of a complete chained recipe 5175 */ 5176 if (!is_root) 5177 ice_collect_result_idx(&buf[buf_idx], 5178 &sw->recp_list[rm->root_rid]); 5179 5180 recp->n_ext_words = entry->r_group.n_val_pairs; 5181 recp->chain_idx = entry->chain_idx; 5182 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; 5183 recp->n_grp_count = rm->n_grp_count; 5184 recp->tun_type = rm->tun_type; 5185 recp->recp_created = true; 5186 } 5187 rm->root_buf = buf; 5188 kfree(tmp); 5189 return status; 5190 5191 err_unroll: 5192 err_mem: 5193 kfree(tmp); 5194 devm_kfree(ice_hw_to_dev(hw), buf); 5195 return status; 5196 } 5197 5198 /** 5199 * ice_create_recipe_group - creates recipe group 5200 * @hw: pointer to hardware structure 5201 * @rm: recipe management list entry 5202 * @lkup_exts: lookup elements 5203 */ 5204 static int 5205 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, 5206 struct ice_prot_lkup_ext *lkup_exts) 5207 { 5208 u8 recp_count = 0; 5209 int status; 5210 5211 rm->n_grp_count = 0; 5212 5213 /* Create recipes for words that are marked not done by packing them 5214 * as best fit. 5215 */ 5216 status = ice_create_first_fit_recp_def(hw, lkup_exts, 5217 &rm->rg_list, &recp_count); 5218 if (!status) { 5219 rm->n_grp_count += recp_count; 5220 rm->n_ext_words = lkup_exts->n_val_words; 5221 memcpy(&rm->ext_words, lkup_exts->fv_words, 5222 sizeof(rm->ext_words)); 5223 memcpy(rm->word_masks, lkup_exts->field_mask, 5224 sizeof(rm->word_masks)); 5225 } 5226 5227 return status; 5228 } 5229 5230 /** 5231 * ice_tun_type_match_word - determine if tun type needs a match mask 5232 * @tun_type: tunnel type 5233 * @mask: mask to be used for the tunnel 5234 */ 5235 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) 5236 { 5237 switch (tun_type) { 5238 case ICE_SW_TUN_GENEVE: 5239 case ICE_SW_TUN_VXLAN: 5240 case ICE_SW_TUN_NVGRE: 5241 case ICE_SW_TUN_GTPU: 5242 case ICE_SW_TUN_GTPC: 5243 *mask = ICE_TUN_FLAG_MASK; 5244 return true; 5245 5246 default: 5247 *mask = 0; 5248 return false; 5249 } 5250 } 5251 5252 /** 5253 * ice_add_special_words - Add words that are not protocols, such as metadata 5254 * @rinfo: other information regarding the rule e.g. priority and action info 5255 * @lkup_exts: lookup word structure 5256 */ 5257 static int 5258 ice_add_special_words(struct ice_adv_rule_info *rinfo, 5259 struct ice_prot_lkup_ext *lkup_exts) 5260 { 5261 u16 mask; 5262 5263 /* If this is a tunneled packet, then add recipe index to match the 5264 * tunnel bit in the packet metadata flags. 5265 */ 5266 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) { 5267 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) { 5268 u8 word = lkup_exts->n_val_words++; 5269 5270 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW; 5271 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF; 5272 lkup_exts->field_mask[word] = mask; 5273 } else { 5274 return -ENOSPC; 5275 } 5276 } 5277 5278 return 0; 5279 } 5280 5281 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule 5282 * @hw: pointer to hardware structure 5283 * @rinfo: other information regarding the rule e.g. priority and action info 5284 * @bm: pointer to memory for returning the bitmap of field vectors 5285 */ 5286 static void 5287 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, 5288 unsigned long *bm) 5289 { 5290 enum ice_prof_type prof_type; 5291 5292 bitmap_zero(bm, ICE_MAX_NUM_PROFILES); 5293 5294 switch (rinfo->tun_type) { 5295 case ICE_NON_TUN: 5296 prof_type = ICE_PROF_NON_TUN; 5297 break; 5298 case ICE_ALL_TUNNELS: 5299 prof_type = ICE_PROF_TUN_ALL; 5300 break; 5301 case ICE_SW_TUN_GENEVE: 5302 case ICE_SW_TUN_VXLAN: 5303 prof_type = ICE_PROF_TUN_UDP; 5304 break; 5305 case ICE_SW_TUN_NVGRE: 5306 prof_type = ICE_PROF_TUN_GRE; 5307 break; 5308 case ICE_SW_TUN_GTPU: 5309 prof_type = ICE_PROF_TUN_GTPU; 5310 break; 5311 case ICE_SW_TUN_GTPC: 5312 prof_type = ICE_PROF_TUN_GTPC; 5313 break; 5314 case ICE_SW_TUN_AND_NON_TUN: 5315 default: 5316 prof_type = ICE_PROF_ALL; 5317 break; 5318 } 5319 5320 ice_get_sw_fv_bitmap(hw, prof_type, bm); 5321 } 5322 5323 /** 5324 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default 5325 * @hw: pointer to hardware structure 5326 * @lkups: lookup elements or match criteria for the advanced recipe, one 5327 * structure per protocol header 5328 * @lkups_cnt: number of protocols 5329 * @rinfo: other information regarding the rule e.g. priority and action info 5330 * @rid: return the recipe ID of the recipe created 5331 */ 5332 static int 5333 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 5334 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid) 5335 { 5336 DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES); 5337 DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES); 5338 struct ice_prot_lkup_ext *lkup_exts; 5339 struct ice_recp_grp_entry *r_entry; 5340 struct ice_sw_fv_list_entry *fvit; 5341 struct ice_recp_grp_entry *r_tmp; 5342 struct ice_sw_fv_list_entry *tmp; 5343 struct ice_sw_recipe *rm; 5344 int status = 0; 5345 u8 i; 5346 5347 if (!lkups_cnt) 5348 return -EINVAL; 5349 5350 lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL); 5351 if (!lkup_exts) 5352 return -ENOMEM; 5353 5354 /* Determine the number of words to be matched and if it exceeds a 5355 * recipe's restrictions 5356 */ 5357 for (i = 0; i < lkups_cnt; i++) { 5358 u16 count; 5359 5360 if (lkups[i].type >= ICE_PROTOCOL_LAST) { 5361 status = -EIO; 5362 goto err_free_lkup_exts; 5363 } 5364 5365 count = ice_fill_valid_words(&lkups[i], lkup_exts); 5366 if (!count) { 5367 status = -EIO; 5368 goto err_free_lkup_exts; 5369 } 5370 } 5371 5372 rm = kzalloc(sizeof(*rm), GFP_KERNEL); 5373 if (!rm) { 5374 status = -ENOMEM; 5375 goto err_free_lkup_exts; 5376 } 5377 5378 /* Get field vectors that contain fields extracted from all the protocol 5379 * headers being programmed. 5380 */ 5381 INIT_LIST_HEAD(&rm->fv_list); 5382 INIT_LIST_HEAD(&rm->rg_list); 5383 5384 /* Get bitmap of field vectors (profiles) that are compatible with the 5385 * rule request; only these will be searched in the subsequent call to 5386 * ice_get_sw_fv_list. 5387 */ 5388 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap); 5389 5390 status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list); 5391 if (status) 5392 goto err_unroll; 5393 5394 /* Create any special protocol/offset pairs, such as looking at tunnel 5395 * bits by extracting metadata 5396 */ 5397 status = ice_add_special_words(rinfo, lkup_exts); 5398 if (status) 5399 goto err_free_lkup_exts; 5400 5401 /* Group match words into recipes using preferred recipe grouping 5402 * criteria. 5403 */ 5404 status = ice_create_recipe_group(hw, rm, lkup_exts); 5405 if (status) 5406 goto err_unroll; 5407 5408 /* set the recipe priority if specified */ 5409 rm->priority = (u8)rinfo->priority; 5410 5411 /* Find offsets from the field vector. Pick the first one for all the 5412 * recipes. 5413 */ 5414 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list); 5415 if (status) 5416 goto err_unroll; 5417 5418 /* get bitmap of all profiles the recipe will be associated with */ 5419 bitmap_zero(profiles, ICE_MAX_NUM_PROFILES); 5420 list_for_each_entry(fvit, &rm->fv_list, list_entry) { 5421 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id); 5422 set_bit((u16)fvit->profile_id, profiles); 5423 } 5424 5425 /* Look for a recipe which matches our requested fv / mask list */ 5426 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type); 5427 if (*rid < ICE_MAX_NUM_RECIPES) 5428 /* Success if found a recipe that match the existing criteria */ 5429 goto err_unroll; 5430 5431 rm->tun_type = rinfo->tun_type; 5432 /* Recipe we need does not exist, add a recipe */ 5433 status = ice_add_sw_recipe(hw, rm, profiles); 5434 if (status) 5435 goto err_unroll; 5436 5437 /* Associate all the recipes created with all the profiles in the 5438 * common field vector. 5439 */ 5440 list_for_each_entry(fvit, &rm->fv_list, list_entry) { 5441 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); 5442 u16 j; 5443 5444 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id, 5445 (u8 *)r_bitmap, NULL); 5446 if (status) 5447 goto err_unroll; 5448 5449 bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap, 5450 ICE_MAX_NUM_RECIPES); 5451 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 5452 if (status) 5453 goto err_unroll; 5454 5455 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id, 5456 (u8 *)r_bitmap, 5457 NULL); 5458 ice_release_change_lock(hw); 5459 5460 if (status) 5461 goto err_unroll; 5462 5463 /* Update profile to recipe bitmap array */ 5464 bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap, 5465 ICE_MAX_NUM_RECIPES); 5466 5467 /* Update recipe to profile bitmap array */ 5468 for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES) 5469 set_bit((u16)fvit->profile_id, recipe_to_profile[j]); 5470 } 5471 5472 *rid = rm->root_rid; 5473 memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts, 5474 sizeof(*lkup_exts)); 5475 err_unroll: 5476 list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) { 5477 list_del(&r_entry->l_entry); 5478 devm_kfree(ice_hw_to_dev(hw), r_entry); 5479 } 5480 5481 list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) { 5482 list_del(&fvit->list_entry); 5483 devm_kfree(ice_hw_to_dev(hw), fvit); 5484 } 5485 5486 if (rm->root_buf) 5487 devm_kfree(ice_hw_to_dev(hw), rm->root_buf); 5488 5489 kfree(rm); 5490 5491 err_free_lkup_exts: 5492 kfree(lkup_exts); 5493 5494 return status; 5495 } 5496 5497 /** 5498 * ice_find_dummy_packet - find dummy packet 5499 * 5500 * @lkups: lookup elements or match criteria for the advanced recipe, one 5501 * structure per protocol header 5502 * @lkups_cnt: number of protocols 5503 * @tun_type: tunnel type 5504 * @pkt: dummy packet to fill according to filter match criteria 5505 * @pkt_len: packet length of dummy packet 5506 * @offsets: pointer to receive the pointer to the offsets for the packet 5507 */ 5508 static void 5509 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 5510 enum ice_sw_tunnel_type tun_type, 5511 const u8 **pkt, u16 *pkt_len, 5512 const struct ice_dummy_pkt_offsets **offsets) 5513 { 5514 bool inner_tcp = false, inner_udp = false, outer_ipv6 = false; 5515 bool vlan = false, inner_ipv6 = false, gtp_no_pay = false; 5516 u16 i; 5517 5518 for (i = 0; i < lkups_cnt; i++) { 5519 if (lkups[i].type == ICE_UDP_ILOS) 5520 inner_udp = true; 5521 else if (lkups[i].type == ICE_TCP_IL) 5522 inner_tcp = true; 5523 else if (lkups[i].type == ICE_IPV6_OFOS) 5524 outer_ipv6 = true; 5525 else if (lkups[i].type == ICE_VLAN_OFOS) 5526 vlan = true; 5527 else if (lkups[i].type == ICE_ETYPE_OL && 5528 lkups[i].h_u.ethertype.ethtype_id == 5529 cpu_to_be16(ICE_IPV6_ETHER_ID) && 5530 lkups[i].m_u.ethertype.ethtype_id == 5531 cpu_to_be16(0xFFFF)) 5532 outer_ipv6 = true; 5533 else if (lkups[i].type == ICE_ETYPE_IL && 5534 lkups[i].h_u.ethertype.ethtype_id == 5535 cpu_to_be16(ICE_IPV6_ETHER_ID) && 5536 lkups[i].m_u.ethertype.ethtype_id == 5537 cpu_to_be16(0xFFFF)) 5538 inner_ipv6 = true; 5539 else if (lkups[i].type == ICE_IPV6_IL) 5540 inner_ipv6 = true; 5541 else if (lkups[i].type == ICE_GTP_NO_PAY) 5542 gtp_no_pay = true; 5543 } 5544 5545 if (tun_type == ICE_SW_TUN_GTPU) { 5546 if (outer_ipv6) { 5547 if (gtp_no_pay) { 5548 *pkt = dummy_ipv6_gtp_packet; 5549 *pkt_len = sizeof(dummy_ipv6_gtp_packet); 5550 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets; 5551 } else if (inner_ipv6) { 5552 if (inner_udp) { 5553 *pkt = dummy_ipv6_gtpu_ipv6_udp_packet; 5554 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet); 5555 *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets; 5556 } else { 5557 *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet; 5558 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet); 5559 *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets; 5560 } 5561 } else { 5562 if (inner_udp) { 5563 *pkt = dummy_ipv6_gtpu_ipv4_udp_packet; 5564 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet); 5565 *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets; 5566 } else { 5567 *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet; 5568 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet); 5569 *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets; 5570 } 5571 } 5572 } else { 5573 if (gtp_no_pay) { 5574 *pkt = dummy_ipv4_gtpu_ipv4_packet; 5575 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet); 5576 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets; 5577 } else if (inner_ipv6) { 5578 if (inner_udp) { 5579 *pkt = dummy_ipv4_gtpu_ipv6_udp_packet; 5580 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet); 5581 *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets; 5582 } else { 5583 *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet; 5584 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet); 5585 *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets; 5586 } 5587 } else { 5588 if (inner_udp) { 5589 *pkt = dummy_ipv4_gtpu_ipv4_udp_packet; 5590 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet); 5591 *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets; 5592 } else { 5593 *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet; 5594 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet); 5595 *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets; 5596 } 5597 } 5598 } 5599 return; 5600 } 5601 5602 if (tun_type == ICE_SW_TUN_GTPC) { 5603 if (outer_ipv6) { 5604 *pkt = dummy_ipv6_gtp_packet; 5605 *pkt_len = sizeof(dummy_ipv6_gtp_packet); 5606 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets; 5607 } else { 5608 *pkt = dummy_ipv4_gtpu_ipv4_packet; 5609 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet); 5610 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets; 5611 } 5612 return; 5613 } 5614 5615 if (tun_type == ICE_SW_TUN_NVGRE) { 5616 if (inner_tcp && inner_ipv6) { 5617 *pkt = dummy_gre_ipv6_tcp_packet; 5618 *pkt_len = sizeof(dummy_gre_ipv6_tcp_packet); 5619 *offsets = dummy_gre_ipv6_tcp_packet_offsets; 5620 return; 5621 } 5622 if (inner_tcp) { 5623 *pkt = dummy_gre_tcp_packet; 5624 *pkt_len = sizeof(dummy_gre_tcp_packet); 5625 *offsets = dummy_gre_tcp_packet_offsets; 5626 return; 5627 } 5628 if (inner_ipv6) { 5629 *pkt = dummy_gre_ipv6_udp_packet; 5630 *pkt_len = sizeof(dummy_gre_ipv6_udp_packet); 5631 *offsets = dummy_gre_ipv6_udp_packet_offsets; 5632 return; 5633 } 5634 *pkt = dummy_gre_udp_packet; 5635 *pkt_len = sizeof(dummy_gre_udp_packet); 5636 *offsets = dummy_gre_udp_packet_offsets; 5637 return; 5638 } 5639 5640 if (tun_type == ICE_SW_TUN_VXLAN || 5641 tun_type == ICE_SW_TUN_GENEVE) { 5642 if (inner_tcp && inner_ipv6) { 5643 *pkt = dummy_udp_tun_ipv6_tcp_packet; 5644 *pkt_len = sizeof(dummy_udp_tun_ipv6_tcp_packet); 5645 *offsets = dummy_udp_tun_ipv6_tcp_packet_offsets; 5646 return; 5647 } 5648 if (inner_tcp) { 5649 *pkt = dummy_udp_tun_tcp_packet; 5650 *pkt_len = sizeof(dummy_udp_tun_tcp_packet); 5651 *offsets = dummy_udp_tun_tcp_packet_offsets; 5652 return; 5653 } 5654 if (inner_ipv6) { 5655 *pkt = dummy_udp_tun_ipv6_udp_packet; 5656 *pkt_len = sizeof(dummy_udp_tun_ipv6_udp_packet); 5657 *offsets = dummy_udp_tun_ipv6_udp_packet_offsets; 5658 return; 5659 } 5660 *pkt = dummy_udp_tun_udp_packet; 5661 *pkt_len = sizeof(dummy_udp_tun_udp_packet); 5662 *offsets = dummy_udp_tun_udp_packet_offsets; 5663 return; 5664 } 5665 5666 if (inner_udp && !outer_ipv6) { 5667 if (vlan) { 5668 *pkt = dummy_vlan_udp_packet; 5669 *pkt_len = sizeof(dummy_vlan_udp_packet); 5670 *offsets = dummy_vlan_udp_packet_offsets; 5671 return; 5672 } 5673 *pkt = dummy_udp_packet; 5674 *pkt_len = sizeof(dummy_udp_packet); 5675 *offsets = dummy_udp_packet_offsets; 5676 return; 5677 } else if (inner_udp && outer_ipv6) { 5678 if (vlan) { 5679 *pkt = dummy_vlan_udp_ipv6_packet; 5680 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet); 5681 *offsets = dummy_vlan_udp_ipv6_packet_offsets; 5682 return; 5683 } 5684 *pkt = dummy_udp_ipv6_packet; 5685 *pkt_len = sizeof(dummy_udp_ipv6_packet); 5686 *offsets = dummy_udp_ipv6_packet_offsets; 5687 return; 5688 } else if ((inner_tcp && outer_ipv6) || outer_ipv6) { 5689 if (vlan) { 5690 *pkt = dummy_vlan_tcp_ipv6_packet; 5691 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet); 5692 *offsets = dummy_vlan_tcp_ipv6_packet_offsets; 5693 return; 5694 } 5695 *pkt = dummy_tcp_ipv6_packet; 5696 *pkt_len = sizeof(dummy_tcp_ipv6_packet); 5697 *offsets = dummy_tcp_ipv6_packet_offsets; 5698 return; 5699 } 5700 5701 if (vlan) { 5702 *pkt = dummy_vlan_tcp_packet; 5703 *pkt_len = sizeof(dummy_vlan_tcp_packet); 5704 *offsets = dummy_vlan_tcp_packet_offsets; 5705 } else { 5706 *pkt = dummy_tcp_packet; 5707 *pkt_len = sizeof(dummy_tcp_packet); 5708 *offsets = dummy_tcp_packet_offsets; 5709 } 5710 } 5711 5712 /** 5713 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria 5714 * 5715 * @lkups: lookup elements or match criteria for the advanced recipe, one 5716 * structure per protocol header 5717 * @lkups_cnt: number of protocols 5718 * @s_rule: stores rule information from the match criteria 5719 * @dummy_pkt: dummy packet to fill according to filter match criteria 5720 * @pkt_len: packet length of dummy packet 5721 * @offsets: offset info for the dummy packet 5722 */ 5723 static int 5724 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 5725 struct ice_aqc_sw_rules_elem *s_rule, 5726 const u8 *dummy_pkt, u16 pkt_len, 5727 const struct ice_dummy_pkt_offsets *offsets) 5728 { 5729 u8 *pkt; 5730 u16 i; 5731 5732 /* Start with a packet with a pre-defined/dummy content. Then, fill 5733 * in the header values to be looked up or matched. 5734 */ 5735 pkt = s_rule->pdata.lkup_tx_rx.hdr; 5736 5737 memcpy(pkt, dummy_pkt, pkt_len); 5738 5739 for (i = 0; i < lkups_cnt; i++) { 5740 enum ice_protocol_type type; 5741 u16 offset = 0, len = 0, j; 5742 bool found = false; 5743 5744 /* find the start of this layer; it should be found since this 5745 * was already checked when search for the dummy packet 5746 */ 5747 type = lkups[i].type; 5748 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) { 5749 if (type == offsets[j].type) { 5750 offset = offsets[j].offset; 5751 found = true; 5752 break; 5753 } 5754 } 5755 /* this should never happen in a correct calling sequence */ 5756 if (!found) 5757 return -EINVAL; 5758 5759 switch (lkups[i].type) { 5760 case ICE_MAC_OFOS: 5761 case ICE_MAC_IL: 5762 len = sizeof(struct ice_ether_hdr); 5763 break; 5764 case ICE_ETYPE_OL: 5765 case ICE_ETYPE_IL: 5766 len = sizeof(struct ice_ethtype_hdr); 5767 break; 5768 case ICE_VLAN_OFOS: 5769 len = sizeof(struct ice_vlan_hdr); 5770 break; 5771 case ICE_IPV4_OFOS: 5772 case ICE_IPV4_IL: 5773 len = sizeof(struct ice_ipv4_hdr); 5774 break; 5775 case ICE_IPV6_OFOS: 5776 case ICE_IPV6_IL: 5777 len = sizeof(struct ice_ipv6_hdr); 5778 break; 5779 case ICE_TCP_IL: 5780 case ICE_UDP_OF: 5781 case ICE_UDP_ILOS: 5782 len = sizeof(struct ice_l4_hdr); 5783 break; 5784 case ICE_SCTP_IL: 5785 len = sizeof(struct ice_sctp_hdr); 5786 break; 5787 case ICE_NVGRE: 5788 len = sizeof(struct ice_nvgre_hdr); 5789 break; 5790 case ICE_VXLAN: 5791 case ICE_GENEVE: 5792 len = sizeof(struct ice_udp_tnl_hdr); 5793 break; 5794 case ICE_GTP_NO_PAY: 5795 case ICE_GTP: 5796 len = sizeof(struct ice_udp_gtp_hdr); 5797 break; 5798 default: 5799 return -EINVAL; 5800 } 5801 5802 /* the length should be a word multiple */ 5803 if (len % ICE_BYTES_PER_WORD) 5804 return -EIO; 5805 5806 /* We have the offset to the header start, the length, the 5807 * caller's header values and mask. Use this information to 5808 * copy the data into the dummy packet appropriately based on 5809 * the mask. Note that we need to only write the bits as 5810 * indicated by the mask to make sure we don't improperly write 5811 * over any significant packet data. 5812 */ 5813 for (j = 0; j < len / sizeof(u16); j++) 5814 if (((u16 *)&lkups[i].m_u)[j]) 5815 ((u16 *)(pkt + offset))[j] = 5816 (((u16 *)(pkt + offset))[j] & 5817 ~((u16 *)&lkups[i].m_u)[j]) | 5818 (((u16 *)&lkups[i].h_u)[j] & 5819 ((u16 *)&lkups[i].m_u)[j]); 5820 } 5821 5822 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len); 5823 5824 return 0; 5825 } 5826 5827 /** 5828 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port 5829 * @hw: pointer to the hardware structure 5830 * @tun_type: tunnel type 5831 * @pkt: dummy packet to fill in 5832 * @offsets: offset info for the dummy packet 5833 */ 5834 static int 5835 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, 5836 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets) 5837 { 5838 u16 open_port, i; 5839 5840 switch (tun_type) { 5841 case ICE_SW_TUN_VXLAN: 5842 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN)) 5843 return -EIO; 5844 break; 5845 case ICE_SW_TUN_GENEVE: 5846 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE)) 5847 return -EIO; 5848 break; 5849 default: 5850 /* Nothing needs to be done for this tunnel type */ 5851 return 0; 5852 } 5853 5854 /* Find the outer UDP protocol header and insert the port number */ 5855 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) { 5856 if (offsets[i].type == ICE_UDP_OF) { 5857 struct ice_l4_hdr *hdr; 5858 u16 offset; 5859 5860 offset = offsets[i].offset; 5861 hdr = (struct ice_l4_hdr *)&pkt[offset]; 5862 hdr->dst_port = cpu_to_be16(open_port); 5863 5864 return 0; 5865 } 5866 } 5867 5868 return -EIO; 5869 } 5870 5871 /** 5872 * ice_find_adv_rule_entry - Search a rule entry 5873 * @hw: pointer to the hardware structure 5874 * @lkups: lookup elements or match criteria for the advanced recipe, one 5875 * structure per protocol header 5876 * @lkups_cnt: number of protocols 5877 * @recp_id: recipe ID for which we are finding the rule 5878 * @rinfo: other information regarding the rule e.g. priority and action info 5879 * 5880 * Helper function to search for a given advance rule entry 5881 * Returns pointer to entry storing the rule if found 5882 */ 5883 static struct ice_adv_fltr_mgmt_list_entry * 5884 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 5885 u16 lkups_cnt, u16 recp_id, 5886 struct ice_adv_rule_info *rinfo) 5887 { 5888 struct ice_adv_fltr_mgmt_list_entry *list_itr; 5889 struct ice_switch_info *sw = hw->switch_info; 5890 int i; 5891 5892 list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules, 5893 list_entry) { 5894 bool lkups_matched = true; 5895 5896 if (lkups_cnt != list_itr->lkups_cnt) 5897 continue; 5898 for (i = 0; i < list_itr->lkups_cnt; i++) 5899 if (memcmp(&list_itr->lkups[i], &lkups[i], 5900 sizeof(*lkups))) { 5901 lkups_matched = false; 5902 break; 5903 } 5904 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag && 5905 rinfo->tun_type == list_itr->rule_info.tun_type && 5906 lkups_matched) 5907 return list_itr; 5908 } 5909 return NULL; 5910 } 5911 5912 /** 5913 * ice_adv_add_update_vsi_list 5914 * @hw: pointer to the hardware structure 5915 * @m_entry: pointer to current adv filter management list entry 5916 * @cur_fltr: filter information from the book keeping entry 5917 * @new_fltr: filter information with the new VSI to be added 5918 * 5919 * Call AQ command to add or update previously created VSI list with new VSI. 5920 * 5921 * Helper function to do book keeping associated with adding filter information 5922 * The algorithm to do the booking keeping is described below : 5923 * When a VSI needs to subscribe to a given advanced filter 5924 * if only one VSI has been added till now 5925 * Allocate a new VSI list and add two VSIs 5926 * to this list using switch rule command 5927 * Update the previously created switch rule with the 5928 * newly created VSI list ID 5929 * if a VSI list was previously created 5930 * Add the new VSI to the previously created VSI list set 5931 * using the update switch rule command 5932 */ 5933 static int 5934 ice_adv_add_update_vsi_list(struct ice_hw *hw, 5935 struct ice_adv_fltr_mgmt_list_entry *m_entry, 5936 struct ice_adv_rule_info *cur_fltr, 5937 struct ice_adv_rule_info *new_fltr) 5938 { 5939 u16 vsi_list_id = 0; 5940 int status; 5941 5942 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || 5943 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP || 5944 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET) 5945 return -EOPNOTSUPP; 5946 5947 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || 5948 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) && 5949 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI || 5950 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)) 5951 return -EOPNOTSUPP; 5952 5953 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { 5954 /* Only one entry existed in the mapping and it was not already 5955 * a part of a VSI list. So, create a VSI list with the old and 5956 * new VSIs. 5957 */ 5958 struct ice_fltr_info tmp_fltr; 5959 u16 vsi_handle_arr[2]; 5960 5961 /* A rule already exists with the new VSI being added */ 5962 if (cur_fltr->sw_act.fwd_id.hw_vsi_id == 5963 new_fltr->sw_act.fwd_id.hw_vsi_id) 5964 return -EEXIST; 5965 5966 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle; 5967 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle; 5968 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 5969 &vsi_list_id, 5970 ICE_SW_LKUP_LAST); 5971 if (status) 5972 return status; 5973 5974 memset(&tmp_fltr, 0, sizeof(tmp_fltr)); 5975 tmp_fltr.flag = m_entry->rule_info.sw_act.flag; 5976 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; 5977 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 5978 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 5979 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST; 5980 5981 /* Update the previous switch rule of "forward to VSI" to 5982 * "fwd to VSI list" 5983 */ 5984 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 5985 if (status) 5986 return status; 5987 5988 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id; 5989 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST; 5990 m_entry->vsi_list_info = 5991 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 5992 vsi_list_id); 5993 } else { 5994 u16 vsi_handle = new_fltr->sw_act.vsi_handle; 5995 5996 if (!m_entry->vsi_list_info) 5997 return -EIO; 5998 5999 /* A rule already exists with the new VSI being added */ 6000 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 6001 return 0; 6002 6003 /* Update the previously created VSI list set with 6004 * the new VSI ID passed in 6005 */ 6006 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id; 6007 6008 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, 6009 vsi_list_id, false, 6010 ice_aqc_opc_update_sw_rules, 6011 ICE_SW_LKUP_LAST); 6012 /* update VSI list mapping info with new VSI ID */ 6013 if (!status) 6014 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); 6015 } 6016 if (!status) 6017 m_entry->vsi_count++; 6018 return status; 6019 } 6020 6021 /** 6022 * ice_add_adv_rule - helper function to create an advanced switch rule 6023 * @hw: pointer to the hardware structure 6024 * @lkups: information on the words that needs to be looked up. All words 6025 * together makes one recipe 6026 * @lkups_cnt: num of entries in the lkups array 6027 * @rinfo: other information related to the rule that needs to be programmed 6028 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be 6029 * ignored is case of error. 6030 * 6031 * This function can program only 1 rule at a time. The lkups is used to 6032 * describe the all the words that forms the "lookup" portion of the recipe. 6033 * These words can span multiple protocols. Callers to this function need to 6034 * pass in a list of protocol headers with lookup information along and mask 6035 * that determines which words are valid from the given protocol header. 6036 * rinfo describes other information related to this rule such as forwarding 6037 * IDs, priority of this rule, etc. 6038 */ 6039 int 6040 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 6041 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, 6042 struct ice_rule_query_data *added_entry) 6043 { 6044 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL; 6045 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle; 6046 const struct ice_dummy_pkt_offsets *pkt_offsets; 6047 struct ice_aqc_sw_rules_elem *s_rule = NULL; 6048 struct list_head *rule_head; 6049 struct ice_switch_info *sw; 6050 const u8 *pkt = NULL; 6051 u16 word_cnt; 6052 u32 act = 0; 6053 int status; 6054 u8 q_rgn; 6055 6056 /* Initialize profile to result index bitmap */ 6057 if (!hw->switch_info->prof_res_bm_init) { 6058 hw->switch_info->prof_res_bm_init = 1; 6059 ice_init_prof_result_bm(hw); 6060 } 6061 6062 if (!lkups_cnt) 6063 return -EINVAL; 6064 6065 /* get # of words we need to match */ 6066 word_cnt = 0; 6067 for (i = 0; i < lkups_cnt; i++) { 6068 u16 j, *ptr; 6069 6070 ptr = (u16 *)&lkups[i].m_u; 6071 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++) 6072 if (ptr[j] != 0) 6073 word_cnt++; 6074 } 6075 6076 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) 6077 return -EINVAL; 6078 6079 /* make sure that we can locate a dummy packet */ 6080 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len, 6081 &pkt_offsets); 6082 if (!pkt) { 6083 status = -EINVAL; 6084 goto err_ice_add_adv_rule; 6085 } 6086 6087 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || 6088 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || 6089 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || 6090 rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) 6091 return -EIO; 6092 6093 vsi_handle = rinfo->sw_act.vsi_handle; 6094 if (!ice_is_vsi_valid(hw, vsi_handle)) 6095 return -EINVAL; 6096 6097 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) 6098 rinfo->sw_act.fwd_id.hw_vsi_id = 6099 ice_get_hw_vsi_num(hw, vsi_handle); 6100 if (rinfo->sw_act.flag & ICE_FLTR_TX) 6101 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle); 6102 6103 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid); 6104 if (status) 6105 return status; 6106 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); 6107 if (m_entry) { 6108 /* we have to add VSI to VSI_LIST and increment vsi_count. 6109 * Also Update VSI list so that we can change forwarding rule 6110 * if the rule already exists, we will check if it exists with 6111 * same vsi_id, if not then add it to the VSI list if it already 6112 * exists if not then create a VSI list and add the existing VSI 6113 * ID and the new VSI ID to the list 6114 * We will add that VSI to the list 6115 */ 6116 status = ice_adv_add_update_vsi_list(hw, m_entry, 6117 &m_entry->rule_info, 6118 rinfo); 6119 if (added_entry) { 6120 added_entry->rid = rid; 6121 added_entry->rule_id = m_entry->rule_info.fltr_rule_id; 6122 added_entry->vsi_handle = rinfo->sw_act.vsi_handle; 6123 } 6124 return status; 6125 } 6126 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; 6127 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); 6128 if (!s_rule) 6129 return -ENOMEM; 6130 if (!rinfo->flags_info.act_valid) { 6131 act |= ICE_SINGLE_ACT_LAN_ENABLE; 6132 act |= ICE_SINGLE_ACT_LB_ENABLE; 6133 } else { 6134 act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE | 6135 ICE_SINGLE_ACT_LB_ENABLE); 6136 } 6137 6138 switch (rinfo->sw_act.fltr_act) { 6139 case ICE_FWD_TO_VSI: 6140 act |= (rinfo->sw_act.fwd_id.hw_vsi_id << 6141 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; 6142 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; 6143 break; 6144 case ICE_FWD_TO_Q: 6145 act |= ICE_SINGLE_ACT_TO_Q; 6146 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 6147 ICE_SINGLE_ACT_Q_INDEX_M; 6148 break; 6149 case ICE_FWD_TO_QGRP: 6150 q_rgn = rinfo->sw_act.qgrp_size > 0 ? 6151 (u8)ilog2(rinfo->sw_act.qgrp_size) : 0; 6152 act |= ICE_SINGLE_ACT_TO_Q; 6153 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 6154 ICE_SINGLE_ACT_Q_INDEX_M; 6155 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & 6156 ICE_SINGLE_ACT_Q_REGION_M; 6157 break; 6158 case ICE_DROP_PACKET: 6159 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 6160 ICE_SINGLE_ACT_VALID_BIT; 6161 break; 6162 default: 6163 status = -EIO; 6164 goto err_ice_add_adv_rule; 6165 } 6166 6167 /* set the rule LOOKUP type based on caller specified 'Rx' 6168 * instead of hardcoding it to be either LOOKUP_TX/RX 6169 * 6170 * for 'Rx' set the source to be the port number 6171 * for 'Tx' set the source to be the source HW VSI number (determined 6172 * by caller) 6173 */ 6174 if (rinfo->rx) { 6175 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); 6176 s_rule->pdata.lkup_tx_rx.src = 6177 cpu_to_le16(hw->port_info->lport); 6178 } else { 6179 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); 6180 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src); 6181 } 6182 6183 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid); 6184 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); 6185 6186 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, 6187 pkt_len, pkt_offsets); 6188 if (status) 6189 goto err_ice_add_adv_rule; 6190 6191 if (rinfo->tun_type != ICE_NON_TUN && 6192 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) { 6193 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, 6194 s_rule->pdata.lkup_tx_rx.hdr, 6195 pkt_offsets); 6196 if (status) 6197 goto err_ice_add_adv_rule; 6198 } 6199 6200 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, 6201 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules, 6202 NULL); 6203 if (status) 6204 goto err_ice_add_adv_rule; 6205 adv_fltr = devm_kzalloc(ice_hw_to_dev(hw), 6206 sizeof(struct ice_adv_fltr_mgmt_list_entry), 6207 GFP_KERNEL); 6208 if (!adv_fltr) { 6209 status = -ENOMEM; 6210 goto err_ice_add_adv_rule; 6211 } 6212 6213 adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups, 6214 lkups_cnt * sizeof(*lkups), GFP_KERNEL); 6215 if (!adv_fltr->lkups) { 6216 status = -ENOMEM; 6217 goto err_ice_add_adv_rule; 6218 } 6219 6220 adv_fltr->lkups_cnt = lkups_cnt; 6221 adv_fltr->rule_info = *rinfo; 6222 adv_fltr->rule_info.fltr_rule_id = 6223 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 6224 sw = hw->switch_info; 6225 sw->recp_list[rid].adv_rule = true; 6226 rule_head = &sw->recp_list[rid].filt_rules; 6227 6228 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) 6229 adv_fltr->vsi_count = 1; 6230 6231 /* Add rule entry to book keeping list */ 6232 list_add(&adv_fltr->list_entry, rule_head); 6233 if (added_entry) { 6234 added_entry->rid = rid; 6235 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id; 6236 added_entry->vsi_handle = rinfo->sw_act.vsi_handle; 6237 } 6238 err_ice_add_adv_rule: 6239 if (status && adv_fltr) { 6240 devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups); 6241 devm_kfree(ice_hw_to_dev(hw), adv_fltr); 6242 } 6243 6244 kfree(s_rule); 6245 6246 return status; 6247 } 6248 6249 /** 6250 * ice_replay_vsi_fltr - Replay filters for requested VSI 6251 * @hw: pointer to the hardware structure 6252 * @vsi_handle: driver VSI handle 6253 * @recp_id: Recipe ID for which rules need to be replayed 6254 * @list_head: list for which filters need to be replayed 6255 * 6256 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. 6257 * It is required to pass valid VSI handle. 6258 */ 6259 static int 6260 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, 6261 struct list_head *list_head) 6262 { 6263 struct ice_fltr_mgmt_list_entry *itr; 6264 int status = 0; 6265 u16 hw_vsi_id; 6266 6267 if (list_empty(list_head)) 6268 return status; 6269 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 6270 6271 list_for_each_entry(itr, list_head, list_entry) { 6272 struct ice_fltr_list_entry f_entry; 6273 6274 f_entry.fltr_info = itr->fltr_info; 6275 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && 6276 itr->fltr_info.vsi_handle == vsi_handle) { 6277 /* update the src in case it is VSI num */ 6278 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 6279 f_entry.fltr_info.src = hw_vsi_id; 6280 status = ice_add_rule_internal(hw, recp_id, &f_entry); 6281 if (status) 6282 goto end; 6283 continue; 6284 } 6285 if (!itr->vsi_list_info || 6286 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) 6287 continue; 6288 /* Clearing it so that the logic can add it back */ 6289 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); 6290 f_entry.fltr_info.vsi_handle = vsi_handle; 6291 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 6292 /* update the src in case it is VSI num */ 6293 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 6294 f_entry.fltr_info.src = hw_vsi_id; 6295 if (recp_id == ICE_SW_LKUP_VLAN) 6296 status = ice_add_vlan_internal(hw, &f_entry); 6297 else 6298 status = ice_add_rule_internal(hw, recp_id, &f_entry); 6299 if (status) 6300 goto end; 6301 } 6302 end: 6303 return status; 6304 } 6305 6306 /** 6307 * ice_adv_rem_update_vsi_list 6308 * @hw: pointer to the hardware structure 6309 * @vsi_handle: VSI handle of the VSI to remove 6310 * @fm_list: filter management entry for which the VSI list management needs to 6311 * be done 6312 */ 6313 static int 6314 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, 6315 struct ice_adv_fltr_mgmt_list_entry *fm_list) 6316 { 6317 struct ice_vsi_list_map_info *vsi_list_info; 6318 enum ice_sw_lkup_type lkup_type; 6319 u16 vsi_list_id; 6320 int status; 6321 6322 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST || 6323 fm_list->vsi_count == 0) 6324 return -EINVAL; 6325 6326 /* A rule with the VSI being removed does not exist */ 6327 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) 6328 return -ENOENT; 6329 6330 lkup_type = ICE_SW_LKUP_LAST; 6331 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id; 6332 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, 6333 ice_aqc_opc_update_sw_rules, 6334 lkup_type); 6335 if (status) 6336 return status; 6337 6338 fm_list->vsi_count--; 6339 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 6340 vsi_list_info = fm_list->vsi_list_info; 6341 if (fm_list->vsi_count == 1) { 6342 struct ice_fltr_info tmp_fltr; 6343 u16 rem_vsi_handle; 6344 6345 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, 6346 ICE_MAX_VSI); 6347 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 6348 return -EIO; 6349 6350 /* Make sure VSI list is empty before removing it below */ 6351 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 6352 vsi_list_id, true, 6353 ice_aqc_opc_update_sw_rules, 6354 lkup_type); 6355 if (status) 6356 return status; 6357 6358 memset(&tmp_fltr, 0, sizeof(tmp_fltr)); 6359 tmp_fltr.flag = fm_list->rule_info.sw_act.flag; 6360 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id; 6361 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; 6362 tmp_fltr.fltr_act = ICE_FWD_TO_VSI; 6363 tmp_fltr.fwd_id.hw_vsi_id = 6364 ice_get_hw_vsi_num(hw, rem_vsi_handle); 6365 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id = 6366 ice_get_hw_vsi_num(hw, rem_vsi_handle); 6367 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle; 6368 6369 /* Update the previous switch rule of "MAC forward to VSI" to 6370 * "MAC fwd to VSI list" 6371 */ 6372 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 6373 if (status) { 6374 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", 6375 tmp_fltr.fwd_id.hw_vsi_id, status); 6376 return status; 6377 } 6378 fm_list->vsi_list_info->ref_cnt--; 6379 6380 /* Remove the VSI list since it is no longer used */ 6381 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 6382 if (status) { 6383 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", 6384 vsi_list_id, status); 6385 return status; 6386 } 6387 6388 list_del(&vsi_list_info->list_entry); 6389 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 6390 fm_list->vsi_list_info = NULL; 6391 } 6392 6393 return status; 6394 } 6395 6396 /** 6397 * ice_rem_adv_rule - removes existing advanced switch rule 6398 * @hw: pointer to the hardware structure 6399 * @lkups: information on the words that needs to be looked up. All words 6400 * together makes one recipe 6401 * @lkups_cnt: num of entries in the lkups array 6402 * @rinfo: Its the pointer to the rule information for the rule 6403 * 6404 * This function can be used to remove 1 rule at a time. The lkups is 6405 * used to describe all the words that forms the "lookup" portion of the 6406 * rule. These words can span multiple protocols. Callers to this function 6407 * need to pass in a list of protocol headers with lookup information along 6408 * and mask that determines which words are valid from the given protocol 6409 * header. rinfo describes other information related to this rule such as 6410 * forwarding IDs, priority of this rule, etc. 6411 */ 6412 static int 6413 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 6414 u16 lkups_cnt, struct ice_adv_rule_info *rinfo) 6415 { 6416 struct ice_adv_fltr_mgmt_list_entry *list_elem; 6417 struct ice_prot_lkup_ext lkup_exts; 6418 bool remove_rule = false; 6419 struct mutex *rule_lock; /* Lock to protect filter rule list */ 6420 u16 i, rid, vsi_handle; 6421 int status = 0; 6422 6423 memset(&lkup_exts, 0, sizeof(lkup_exts)); 6424 for (i = 0; i < lkups_cnt; i++) { 6425 u16 count; 6426 6427 if (lkups[i].type >= ICE_PROTOCOL_LAST) 6428 return -EIO; 6429 6430 count = ice_fill_valid_words(&lkups[i], &lkup_exts); 6431 if (!count) 6432 return -EIO; 6433 } 6434 6435 /* Create any special protocol/offset pairs, such as looking at tunnel 6436 * bits by extracting metadata 6437 */ 6438 status = ice_add_special_words(rinfo, &lkup_exts); 6439 if (status) 6440 return status; 6441 6442 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type); 6443 /* If did not find a recipe that match the existing criteria */ 6444 if (rid == ICE_MAX_NUM_RECIPES) 6445 return -EINVAL; 6446 6447 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock; 6448 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); 6449 /* the rule is already removed */ 6450 if (!list_elem) 6451 return 0; 6452 mutex_lock(rule_lock); 6453 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) { 6454 remove_rule = true; 6455 } else if (list_elem->vsi_count > 1) { 6456 remove_rule = false; 6457 vsi_handle = rinfo->sw_act.vsi_handle; 6458 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); 6459 } else { 6460 vsi_handle = rinfo->sw_act.vsi_handle; 6461 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); 6462 if (status) { 6463 mutex_unlock(rule_lock); 6464 return status; 6465 } 6466 if (list_elem->vsi_count == 0) 6467 remove_rule = true; 6468 } 6469 mutex_unlock(rule_lock); 6470 if (remove_rule) { 6471 struct ice_aqc_sw_rules_elem *s_rule; 6472 u16 rule_buf_sz; 6473 6474 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; 6475 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); 6476 if (!s_rule) 6477 return -ENOMEM; 6478 s_rule->pdata.lkup_tx_rx.act = 0; 6479 s_rule->pdata.lkup_tx_rx.index = 6480 cpu_to_le16(list_elem->rule_info.fltr_rule_id); 6481 s_rule->pdata.lkup_tx_rx.hdr_len = 0; 6482 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, 6483 rule_buf_sz, 1, 6484 ice_aqc_opc_remove_sw_rules, NULL); 6485 if (!status || status == -ENOENT) { 6486 struct ice_switch_info *sw = hw->switch_info; 6487 6488 mutex_lock(rule_lock); 6489 list_del(&list_elem->list_entry); 6490 devm_kfree(ice_hw_to_dev(hw), list_elem->lkups); 6491 devm_kfree(ice_hw_to_dev(hw), list_elem); 6492 mutex_unlock(rule_lock); 6493 if (list_empty(&sw->recp_list[rid].filt_rules)) 6494 sw->recp_list[rid].adv_rule = false; 6495 } 6496 kfree(s_rule); 6497 } 6498 return status; 6499 } 6500 6501 /** 6502 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID 6503 * @hw: pointer to the hardware structure 6504 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID 6505 * 6506 * This function is used to remove 1 rule at a time. The removal is based on 6507 * the remove_entry parameter. This function will remove rule for a given 6508 * vsi_handle with a given rule_id which is passed as parameter in remove_entry 6509 */ 6510 int 6511 ice_rem_adv_rule_by_id(struct ice_hw *hw, 6512 struct ice_rule_query_data *remove_entry) 6513 { 6514 struct ice_adv_fltr_mgmt_list_entry *list_itr; 6515 struct list_head *list_head; 6516 struct ice_adv_rule_info rinfo; 6517 struct ice_switch_info *sw; 6518 6519 sw = hw->switch_info; 6520 if (!sw->recp_list[remove_entry->rid].recp_created) 6521 return -EINVAL; 6522 list_head = &sw->recp_list[remove_entry->rid].filt_rules; 6523 list_for_each_entry(list_itr, list_head, list_entry) { 6524 if (list_itr->rule_info.fltr_rule_id == 6525 remove_entry->rule_id) { 6526 rinfo = list_itr->rule_info; 6527 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle; 6528 return ice_rem_adv_rule(hw, list_itr->lkups, 6529 list_itr->lkups_cnt, &rinfo); 6530 } 6531 } 6532 /* either list is empty or unable to find rule */ 6533 return -ENOENT; 6534 } 6535 6536 /** 6537 * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a 6538 * given VSI handle 6539 * @hw: pointer to the hardware structure 6540 * @vsi_handle: VSI handle for which we are supposed to remove all the rules. 6541 * 6542 * This function is used to remove all the rules for a given VSI and as soon 6543 * as removing a rule fails, it will return immediately with the error code, 6544 * else it will return success. 6545 */ 6546 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle) 6547 { 6548 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry; 6549 struct ice_vsi_list_map_info *map_info; 6550 struct ice_adv_rule_info rinfo; 6551 struct list_head *list_head; 6552 struct ice_switch_info *sw; 6553 int status; 6554 u8 rid; 6555 6556 sw = hw->switch_info; 6557 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) { 6558 if (!sw->recp_list[rid].recp_created) 6559 continue; 6560 if (!sw->recp_list[rid].adv_rule) 6561 continue; 6562 6563 list_head = &sw->recp_list[rid].filt_rules; 6564 list_for_each_entry_safe(list_itr, tmp_entry, list_head, 6565 list_entry) { 6566 rinfo = list_itr->rule_info; 6567 6568 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) { 6569 map_info = list_itr->vsi_list_info; 6570 if (!map_info) 6571 continue; 6572 6573 if (!test_bit(vsi_handle, map_info->vsi_map)) 6574 continue; 6575 } else if (rinfo.sw_act.vsi_handle != vsi_handle) { 6576 continue; 6577 } 6578 6579 rinfo.sw_act.vsi_handle = vsi_handle; 6580 status = ice_rem_adv_rule(hw, list_itr->lkups, 6581 list_itr->lkups_cnt, &rinfo); 6582 if (status) 6583 return status; 6584 } 6585 } 6586 return 0; 6587 } 6588 6589 /** 6590 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI 6591 * @hw: pointer to the hardware structure 6592 * @vsi_handle: driver VSI handle 6593 * @list_head: list for which filters need to be replayed 6594 * 6595 * Replay the advanced rule for the given VSI. 6596 */ 6597 static int 6598 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle, 6599 struct list_head *list_head) 6600 { 6601 struct ice_rule_query_data added_entry = { 0 }; 6602 struct ice_adv_fltr_mgmt_list_entry *adv_fltr; 6603 int status = 0; 6604 6605 if (list_empty(list_head)) 6606 return status; 6607 list_for_each_entry(adv_fltr, list_head, list_entry) { 6608 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info; 6609 u16 lk_cnt = adv_fltr->lkups_cnt; 6610 6611 if (vsi_handle != rinfo->sw_act.vsi_handle) 6612 continue; 6613 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo, 6614 &added_entry); 6615 if (status) 6616 break; 6617 } 6618 return status; 6619 } 6620 6621 /** 6622 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists 6623 * @hw: pointer to the hardware structure 6624 * @vsi_handle: driver VSI handle 6625 * 6626 * Replays filters for requested VSI via vsi_handle. 6627 */ 6628 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) 6629 { 6630 struct ice_switch_info *sw = hw->switch_info; 6631 int status; 6632 u8 i; 6633 6634 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 6635 struct list_head *head; 6636 6637 head = &sw->recp_list[i].filt_replay_rules; 6638 if (!sw->recp_list[i].adv_rule) 6639 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head); 6640 else 6641 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head); 6642 if (status) 6643 return status; 6644 } 6645 return status; 6646 } 6647 6648 /** 6649 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules 6650 * @hw: pointer to the HW struct 6651 * 6652 * Deletes the filter replay rules. 6653 */ 6654 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw) 6655 { 6656 struct ice_switch_info *sw = hw->switch_info; 6657 u8 i; 6658 6659 if (!sw) 6660 return; 6661 6662 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 6663 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) { 6664 struct list_head *l_head; 6665 6666 l_head = &sw->recp_list[i].filt_replay_rules; 6667 if (!sw->recp_list[i].adv_rule) 6668 ice_rem_sw_rule_info(hw, l_head); 6669 else 6670 ice_rem_adv_rule_info(hw, l_head); 6671 } 6672 } 6673 } 6674