1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_lib.h" 5 #include "ice_switch.h" 6 7 #define ICE_ETH_DA_OFFSET 0 8 #define ICE_ETH_ETHTYPE_OFFSET 12 9 #define ICE_ETH_VLAN_TCI_OFFSET 14 10 #define ICE_MAX_VLAN_ID 0xFFF 11 #define ICE_IPV6_ETHER_ID 0x86DD 12 13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem 14 * struct to configure any switch filter rules. 15 * {DA (6 bytes), SA(6 bytes), 16 * Ether type (2 bytes for header without VLAN tag) OR 17 * VLAN tag (4 bytes for header with VLAN tag) } 18 * 19 * Word on Hardcoded values 20 * byte 0 = 0x2: to identify it as locally administered DA MAC 21 * byte 6 = 0x2: to identify it as locally administered SA MAC 22 * byte 12 = 0x81 & byte 13 = 0x00: 23 * In case of VLAN filter first two bytes defines ether type (0x8100) 24 * and remaining two bytes are placeholder for programming a given VLAN ID 25 * In case of Ether type filter it is treated as header without VLAN tag 26 * and byte 12 and 13 is used to program a given Ether type instead 27 */ 28 #define DUMMY_ETH_HDR_LEN 16 29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 30 0x2, 0, 0, 0, 0, 0, 31 0x81, 0, 0, 0}; 32 33 enum { 34 ICE_PKT_OUTER_IPV6 = BIT(0), 35 ICE_PKT_TUN_GTPC = BIT(1), 36 ICE_PKT_TUN_GTPU = BIT(2), 37 ICE_PKT_TUN_NVGRE = BIT(3), 38 ICE_PKT_TUN_UDP = BIT(4), 39 ICE_PKT_INNER_IPV6 = BIT(5), 40 ICE_PKT_INNER_TCP = BIT(6), 41 ICE_PKT_INNER_UDP = BIT(7), 42 ICE_PKT_GTP_NOPAY = BIT(8), 43 ICE_PKT_KMALLOC = BIT(9), 44 ICE_PKT_PPPOE = BIT(10), 45 }; 46 47 struct ice_dummy_pkt_offsets { 48 enum ice_protocol_type type; 49 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ 50 }; 51 52 struct ice_dummy_pkt_profile { 53 const struct ice_dummy_pkt_offsets *offsets; 54 const u8 *pkt; 55 u32 match; 56 u16 pkt_len; 57 u16 offsets_len; 58 }; 59 60 #define ICE_DECLARE_PKT_OFFSETS(type) \ 61 static const struct ice_dummy_pkt_offsets \ 62 ice_dummy_##type##_packet_offsets[] 63 64 #define ICE_DECLARE_PKT_TEMPLATE(type) \ 65 static const u8 ice_dummy_##type##_packet[] 66 67 #define ICE_PKT_PROFILE(type, m) { \ 68 .match = (m), \ 69 .pkt = ice_dummy_##type##_packet, \ 70 .pkt_len = sizeof(ice_dummy_##type##_packet), \ 71 .offsets = ice_dummy_##type##_packet_offsets, \ 72 .offsets_len = sizeof(ice_dummy_##type##_packet_offsets), \ 73 } 74 75 ICE_DECLARE_PKT_OFFSETS(vlan) = { 76 { ICE_VLAN_OFOS, 12 }, 77 }; 78 79 ICE_DECLARE_PKT_TEMPLATE(vlan) = { 80 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 81 }; 82 83 ICE_DECLARE_PKT_OFFSETS(qinq) = { 84 { ICE_VLAN_EX, 12 }, 85 { ICE_VLAN_IN, 16 }, 86 }; 87 88 ICE_DECLARE_PKT_TEMPLATE(qinq) = { 89 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */ 90 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */ 91 }; 92 93 ICE_DECLARE_PKT_OFFSETS(gre_tcp) = { 94 { ICE_MAC_OFOS, 0 }, 95 { ICE_ETYPE_OL, 12 }, 96 { ICE_IPV4_OFOS, 14 }, 97 { ICE_NVGRE, 34 }, 98 { ICE_MAC_IL, 42 }, 99 { ICE_ETYPE_IL, 54 }, 100 { ICE_IPV4_IL, 56 }, 101 { ICE_TCP_IL, 76 }, 102 { ICE_PROTOCOL_LAST, 0 }, 103 }; 104 105 ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = { 106 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 107 0x00, 0x00, 0x00, 0x00, 108 0x00, 0x00, 0x00, 0x00, 109 110 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 111 112 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */ 113 0x00, 0x00, 0x00, 0x00, 114 0x00, 0x2F, 0x00, 0x00, 115 0x00, 0x00, 0x00, 0x00, 116 0x00, 0x00, 0x00, 0x00, 117 118 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ 119 0x00, 0x00, 0x00, 0x00, 120 121 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 122 0x00, 0x00, 0x00, 0x00, 123 0x00, 0x00, 0x00, 0x00, 124 125 0x08, 0x00, /* ICE_ETYPE_IL 54 */ 126 127 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ 128 0x00, 0x00, 0x00, 0x00, 129 0x00, 0x06, 0x00, 0x00, 130 0x00, 0x00, 0x00, 0x00, 131 0x00, 0x00, 0x00, 0x00, 132 133 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */ 134 0x00, 0x00, 0x00, 0x00, 135 0x00, 0x00, 0x00, 0x00, 136 0x50, 0x02, 0x20, 0x00, 137 0x00, 0x00, 0x00, 0x00 138 }; 139 140 ICE_DECLARE_PKT_OFFSETS(gre_udp) = { 141 { ICE_MAC_OFOS, 0 }, 142 { ICE_ETYPE_OL, 12 }, 143 { ICE_IPV4_OFOS, 14 }, 144 { ICE_NVGRE, 34 }, 145 { ICE_MAC_IL, 42 }, 146 { ICE_ETYPE_IL, 54 }, 147 { ICE_IPV4_IL, 56 }, 148 { ICE_UDP_ILOS, 76 }, 149 { ICE_PROTOCOL_LAST, 0 }, 150 }; 151 152 ICE_DECLARE_PKT_TEMPLATE(gre_udp) = { 153 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 154 0x00, 0x00, 0x00, 0x00, 155 0x00, 0x00, 0x00, 0x00, 156 157 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 158 159 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */ 160 0x00, 0x00, 0x00, 0x00, 161 0x00, 0x2F, 0x00, 0x00, 162 0x00, 0x00, 0x00, 0x00, 163 0x00, 0x00, 0x00, 0x00, 164 165 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ 166 0x00, 0x00, 0x00, 0x00, 167 168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 169 0x00, 0x00, 0x00, 0x00, 170 0x00, 0x00, 0x00, 0x00, 171 172 0x08, 0x00, /* ICE_ETYPE_IL 54 */ 173 174 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ 175 0x00, 0x00, 0x00, 0x00, 176 0x00, 0x11, 0x00, 0x00, 177 0x00, 0x00, 0x00, 0x00, 178 0x00, 0x00, 0x00, 0x00, 179 180 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */ 181 0x00, 0x08, 0x00, 0x00, 182 }; 183 184 ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = { 185 { ICE_MAC_OFOS, 0 }, 186 { ICE_ETYPE_OL, 12 }, 187 { ICE_IPV4_OFOS, 14 }, 188 { ICE_UDP_OF, 34 }, 189 { ICE_VXLAN, 42 }, 190 { ICE_GENEVE, 42 }, 191 { ICE_VXLAN_GPE, 42 }, 192 { ICE_MAC_IL, 50 }, 193 { ICE_ETYPE_IL, 62 }, 194 { ICE_IPV4_IL, 64 }, 195 { ICE_TCP_IL, 84 }, 196 { ICE_PROTOCOL_LAST, 0 }, 197 }; 198 199 ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = { 200 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 201 0x00, 0x00, 0x00, 0x00, 202 0x00, 0x00, 0x00, 0x00, 203 204 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 205 206 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */ 207 0x00, 0x01, 0x00, 0x00, 208 0x40, 0x11, 0x00, 0x00, 209 0x00, 0x00, 0x00, 0x00, 210 0x00, 0x00, 0x00, 0x00, 211 212 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ 213 0x00, 0x46, 0x00, 0x00, 214 215 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ 216 0x00, 0x00, 0x00, 0x00, 217 218 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 219 0x00, 0x00, 0x00, 0x00, 220 0x00, 0x00, 0x00, 0x00, 221 222 0x08, 0x00, /* ICE_ETYPE_IL 62 */ 223 224 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */ 225 0x00, 0x01, 0x00, 0x00, 226 0x40, 0x06, 0x00, 0x00, 227 0x00, 0x00, 0x00, 0x00, 228 0x00, 0x00, 0x00, 0x00, 229 230 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */ 231 0x00, 0x00, 0x00, 0x00, 232 0x00, 0x00, 0x00, 0x00, 233 0x50, 0x02, 0x20, 0x00, 234 0x00, 0x00, 0x00, 0x00 235 }; 236 237 ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = { 238 { ICE_MAC_OFOS, 0 }, 239 { ICE_ETYPE_OL, 12 }, 240 { ICE_IPV4_OFOS, 14 }, 241 { ICE_UDP_OF, 34 }, 242 { ICE_VXLAN, 42 }, 243 { ICE_GENEVE, 42 }, 244 { ICE_VXLAN_GPE, 42 }, 245 { ICE_MAC_IL, 50 }, 246 { ICE_ETYPE_IL, 62 }, 247 { ICE_IPV4_IL, 64 }, 248 { ICE_UDP_ILOS, 84 }, 249 { ICE_PROTOCOL_LAST, 0 }, 250 }; 251 252 ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = { 253 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 254 0x00, 0x00, 0x00, 0x00, 255 0x00, 0x00, 0x00, 0x00, 256 257 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 258 259 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */ 260 0x00, 0x01, 0x00, 0x00, 261 0x00, 0x11, 0x00, 0x00, 262 0x00, 0x00, 0x00, 0x00, 263 0x00, 0x00, 0x00, 0x00, 264 265 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ 266 0x00, 0x3a, 0x00, 0x00, 267 268 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ 269 0x00, 0x00, 0x00, 0x00, 270 271 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 272 0x00, 0x00, 0x00, 0x00, 273 0x00, 0x00, 0x00, 0x00, 274 275 0x08, 0x00, /* ICE_ETYPE_IL 62 */ 276 277 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */ 278 0x00, 0x01, 0x00, 0x00, 279 0x00, 0x11, 0x00, 0x00, 280 0x00, 0x00, 0x00, 0x00, 281 0x00, 0x00, 0x00, 0x00, 282 283 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */ 284 0x00, 0x08, 0x00, 0x00, 285 }; 286 287 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = { 288 { ICE_MAC_OFOS, 0 }, 289 { ICE_ETYPE_OL, 12 }, 290 { ICE_IPV4_OFOS, 14 }, 291 { ICE_NVGRE, 34 }, 292 { ICE_MAC_IL, 42 }, 293 { ICE_ETYPE_IL, 54 }, 294 { ICE_IPV6_IL, 56 }, 295 { ICE_TCP_IL, 96 }, 296 { ICE_PROTOCOL_LAST, 0 }, 297 }; 298 299 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = { 300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 301 0x00, 0x00, 0x00, 0x00, 302 0x00, 0x00, 0x00, 0x00, 303 304 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 305 306 0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */ 307 0x00, 0x00, 0x00, 0x00, 308 0x00, 0x2F, 0x00, 0x00, 309 0x00, 0x00, 0x00, 0x00, 310 0x00, 0x00, 0x00, 0x00, 311 312 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ 313 0x00, 0x00, 0x00, 0x00, 314 315 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 316 0x00, 0x00, 0x00, 0x00, 317 0x00, 0x00, 0x00, 0x00, 318 319 0x86, 0xdd, /* ICE_ETYPE_IL 54 */ 320 321 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */ 322 0x00, 0x08, 0x06, 0x40, 323 0x00, 0x00, 0x00, 0x00, 324 0x00, 0x00, 0x00, 0x00, 325 0x00, 0x00, 0x00, 0x00, 326 0x00, 0x00, 0x00, 0x00, 327 0x00, 0x00, 0x00, 0x00, 328 0x00, 0x00, 0x00, 0x00, 329 0x00, 0x00, 0x00, 0x00, 330 0x00, 0x00, 0x00, 0x00, 331 332 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */ 333 0x00, 0x00, 0x00, 0x00, 334 0x00, 0x00, 0x00, 0x00, 335 0x50, 0x02, 0x20, 0x00, 336 0x00, 0x00, 0x00, 0x00 337 }; 338 339 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = { 340 { ICE_MAC_OFOS, 0 }, 341 { ICE_ETYPE_OL, 12 }, 342 { ICE_IPV4_OFOS, 14 }, 343 { ICE_NVGRE, 34 }, 344 { ICE_MAC_IL, 42 }, 345 { ICE_ETYPE_IL, 54 }, 346 { ICE_IPV6_IL, 56 }, 347 { ICE_UDP_ILOS, 96 }, 348 { ICE_PROTOCOL_LAST, 0 }, 349 }; 350 351 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = { 352 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 353 0x00, 0x00, 0x00, 0x00, 354 0x00, 0x00, 0x00, 0x00, 355 356 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 357 358 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */ 359 0x00, 0x00, 0x00, 0x00, 360 0x00, 0x2F, 0x00, 0x00, 361 0x00, 0x00, 0x00, 0x00, 362 0x00, 0x00, 0x00, 0x00, 363 364 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ 365 0x00, 0x00, 0x00, 0x00, 366 367 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 368 0x00, 0x00, 0x00, 0x00, 369 0x00, 0x00, 0x00, 0x00, 370 371 0x86, 0xdd, /* ICE_ETYPE_IL 54 */ 372 373 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */ 374 0x00, 0x08, 0x11, 0x40, 375 0x00, 0x00, 0x00, 0x00, 376 0x00, 0x00, 0x00, 0x00, 377 0x00, 0x00, 0x00, 0x00, 378 0x00, 0x00, 0x00, 0x00, 379 0x00, 0x00, 0x00, 0x00, 380 0x00, 0x00, 0x00, 0x00, 381 0x00, 0x00, 0x00, 0x00, 382 0x00, 0x00, 0x00, 0x00, 383 384 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */ 385 0x00, 0x08, 0x00, 0x00, 386 }; 387 388 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = { 389 { ICE_MAC_OFOS, 0 }, 390 { ICE_ETYPE_OL, 12 }, 391 { ICE_IPV4_OFOS, 14 }, 392 { ICE_UDP_OF, 34 }, 393 { ICE_VXLAN, 42 }, 394 { ICE_GENEVE, 42 }, 395 { ICE_VXLAN_GPE, 42 }, 396 { ICE_MAC_IL, 50 }, 397 { ICE_ETYPE_IL, 62 }, 398 { ICE_IPV6_IL, 64 }, 399 { ICE_TCP_IL, 104 }, 400 { ICE_PROTOCOL_LAST, 0 }, 401 }; 402 403 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = { 404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 405 0x00, 0x00, 0x00, 0x00, 406 0x00, 0x00, 0x00, 0x00, 407 408 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 409 410 0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */ 411 0x00, 0x01, 0x00, 0x00, 412 0x40, 0x11, 0x00, 0x00, 413 0x00, 0x00, 0x00, 0x00, 414 0x00, 0x00, 0x00, 0x00, 415 416 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ 417 0x00, 0x5a, 0x00, 0x00, 418 419 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ 420 0x00, 0x00, 0x00, 0x00, 421 422 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 423 0x00, 0x00, 0x00, 0x00, 424 0x00, 0x00, 0x00, 0x00, 425 426 0x86, 0xdd, /* ICE_ETYPE_IL 62 */ 427 428 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */ 429 0x00, 0x08, 0x06, 0x40, 430 0x00, 0x00, 0x00, 0x00, 431 0x00, 0x00, 0x00, 0x00, 432 0x00, 0x00, 0x00, 0x00, 433 0x00, 0x00, 0x00, 0x00, 434 0x00, 0x00, 0x00, 0x00, 435 0x00, 0x00, 0x00, 0x00, 436 0x00, 0x00, 0x00, 0x00, 437 0x00, 0x00, 0x00, 0x00, 438 439 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */ 440 0x00, 0x00, 0x00, 0x00, 441 0x00, 0x00, 0x00, 0x00, 442 0x50, 0x02, 0x20, 0x00, 443 0x00, 0x00, 0x00, 0x00 444 }; 445 446 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = { 447 { ICE_MAC_OFOS, 0 }, 448 { ICE_ETYPE_OL, 12 }, 449 { ICE_IPV4_OFOS, 14 }, 450 { ICE_UDP_OF, 34 }, 451 { ICE_VXLAN, 42 }, 452 { ICE_GENEVE, 42 }, 453 { ICE_VXLAN_GPE, 42 }, 454 { ICE_MAC_IL, 50 }, 455 { ICE_ETYPE_IL, 62 }, 456 { ICE_IPV6_IL, 64 }, 457 { ICE_UDP_ILOS, 104 }, 458 { ICE_PROTOCOL_LAST, 0 }, 459 }; 460 461 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = { 462 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 463 0x00, 0x00, 0x00, 0x00, 464 0x00, 0x00, 0x00, 0x00, 465 466 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 467 468 0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */ 469 0x00, 0x01, 0x00, 0x00, 470 0x00, 0x11, 0x00, 0x00, 471 0x00, 0x00, 0x00, 0x00, 472 0x00, 0x00, 0x00, 0x00, 473 474 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ 475 0x00, 0x4e, 0x00, 0x00, 476 477 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ 478 0x00, 0x00, 0x00, 0x00, 479 480 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 481 0x00, 0x00, 0x00, 0x00, 482 0x00, 0x00, 0x00, 0x00, 483 484 0x86, 0xdd, /* ICE_ETYPE_IL 62 */ 485 486 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */ 487 0x00, 0x08, 0x11, 0x40, 488 0x00, 0x00, 0x00, 0x00, 489 0x00, 0x00, 0x00, 0x00, 490 0x00, 0x00, 0x00, 0x00, 491 0x00, 0x00, 0x00, 0x00, 492 0x00, 0x00, 0x00, 0x00, 493 0x00, 0x00, 0x00, 0x00, 494 0x00, 0x00, 0x00, 0x00, 495 0x00, 0x00, 0x00, 0x00, 496 497 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */ 498 0x00, 0x08, 0x00, 0x00, 499 }; 500 501 /* offset info for MAC + IPv4 + UDP dummy packet */ 502 ICE_DECLARE_PKT_OFFSETS(udp) = { 503 { ICE_MAC_OFOS, 0 }, 504 { ICE_ETYPE_OL, 12 }, 505 { ICE_IPV4_OFOS, 14 }, 506 { ICE_UDP_ILOS, 34 }, 507 { ICE_PROTOCOL_LAST, 0 }, 508 }; 509 510 /* Dummy packet for MAC + IPv4 + UDP */ 511 ICE_DECLARE_PKT_TEMPLATE(udp) = { 512 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 513 0x00, 0x00, 0x00, 0x00, 514 0x00, 0x00, 0x00, 0x00, 515 516 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 517 518 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */ 519 0x00, 0x01, 0x00, 0x00, 520 0x00, 0x11, 0x00, 0x00, 521 0x00, 0x00, 0x00, 0x00, 522 0x00, 0x00, 0x00, 0x00, 523 524 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */ 525 0x00, 0x08, 0x00, 0x00, 526 527 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 528 }; 529 530 /* offset info for MAC + IPv4 + TCP dummy packet */ 531 ICE_DECLARE_PKT_OFFSETS(tcp) = { 532 { ICE_MAC_OFOS, 0 }, 533 { ICE_ETYPE_OL, 12 }, 534 { ICE_IPV4_OFOS, 14 }, 535 { ICE_TCP_IL, 34 }, 536 { ICE_PROTOCOL_LAST, 0 }, 537 }; 538 539 /* Dummy packet for MAC + IPv4 + TCP */ 540 ICE_DECLARE_PKT_TEMPLATE(tcp) = { 541 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 542 0x00, 0x00, 0x00, 0x00, 543 0x00, 0x00, 0x00, 0x00, 544 545 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 546 547 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */ 548 0x00, 0x01, 0x00, 0x00, 549 0x00, 0x06, 0x00, 0x00, 550 0x00, 0x00, 0x00, 0x00, 551 0x00, 0x00, 0x00, 0x00, 552 553 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */ 554 0x00, 0x00, 0x00, 0x00, 555 0x00, 0x00, 0x00, 0x00, 556 0x50, 0x00, 0x00, 0x00, 557 0x00, 0x00, 0x00, 0x00, 558 559 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 560 }; 561 562 ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = { 563 { ICE_MAC_OFOS, 0 }, 564 { ICE_ETYPE_OL, 12 }, 565 { ICE_IPV6_OFOS, 14 }, 566 { ICE_TCP_IL, 54 }, 567 { ICE_PROTOCOL_LAST, 0 }, 568 }; 569 570 ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = { 571 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 572 0x00, 0x00, 0x00, 0x00, 573 0x00, 0x00, 0x00, 0x00, 574 575 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ 576 577 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ 578 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ 579 0x00, 0x00, 0x00, 0x00, 580 0x00, 0x00, 0x00, 0x00, 581 0x00, 0x00, 0x00, 0x00, 582 0x00, 0x00, 0x00, 0x00, 583 0x00, 0x00, 0x00, 0x00, 584 0x00, 0x00, 0x00, 0x00, 585 0x00, 0x00, 0x00, 0x00, 586 0x00, 0x00, 0x00, 0x00, 587 588 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */ 589 0x00, 0x00, 0x00, 0x00, 590 0x00, 0x00, 0x00, 0x00, 591 0x50, 0x00, 0x00, 0x00, 592 0x00, 0x00, 0x00, 0x00, 593 594 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 595 }; 596 597 /* IPv6 + UDP */ 598 ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = { 599 { ICE_MAC_OFOS, 0 }, 600 { ICE_ETYPE_OL, 12 }, 601 { ICE_IPV6_OFOS, 14 }, 602 { ICE_UDP_ILOS, 54 }, 603 { ICE_PROTOCOL_LAST, 0 }, 604 }; 605 606 /* IPv6 + UDP dummy packet */ 607 ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = { 608 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 609 0x00, 0x00, 0x00, 0x00, 610 0x00, 0x00, 0x00, 0x00, 611 612 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ 613 614 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ 615 0x00, 0x10, 0x11, 0x00, /* Next header UDP */ 616 0x00, 0x00, 0x00, 0x00, 617 0x00, 0x00, 0x00, 0x00, 618 0x00, 0x00, 0x00, 0x00, 619 0x00, 0x00, 0x00, 0x00, 620 0x00, 0x00, 0x00, 0x00, 621 0x00, 0x00, 0x00, 0x00, 622 0x00, 0x00, 0x00, 0x00, 623 0x00, 0x00, 0x00, 0x00, 624 625 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */ 626 0x00, 0x10, 0x00, 0x00, 627 628 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */ 629 0x00, 0x00, 0x00, 0x00, 630 631 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 632 }; 633 634 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */ 635 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = { 636 { ICE_MAC_OFOS, 0 }, 637 { ICE_IPV4_OFOS, 14 }, 638 { ICE_UDP_OF, 34 }, 639 { ICE_GTP, 42 }, 640 { ICE_IPV4_IL, 62 }, 641 { ICE_TCP_IL, 82 }, 642 { ICE_PROTOCOL_LAST, 0 }, 643 }; 644 645 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = { 646 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 647 0x00, 0x00, 0x00, 0x00, 648 0x00, 0x00, 0x00, 0x00, 649 0x08, 0x00, 650 651 0x45, 0x00, 0x00, 0x58, /* IP 14 */ 652 0x00, 0x00, 0x00, 0x00, 653 0x00, 0x11, 0x00, 0x00, 654 0x00, 0x00, 0x00, 0x00, 655 0x00, 0x00, 0x00, 0x00, 656 657 0x00, 0x00, 0x08, 0x68, /* UDP 34 */ 658 0x00, 0x44, 0x00, 0x00, 659 660 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */ 661 0x00, 0x00, 0x00, 0x00, 662 0x00, 0x00, 0x00, 0x85, 663 664 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */ 665 0x00, 0x00, 0x00, 0x00, 666 667 0x45, 0x00, 0x00, 0x28, /* IP 62 */ 668 0x00, 0x00, 0x00, 0x00, 669 0x00, 0x06, 0x00, 0x00, 670 0x00, 0x00, 0x00, 0x00, 671 0x00, 0x00, 0x00, 0x00, 672 673 0x00, 0x00, 0x00, 0x00, /* TCP 82 */ 674 0x00, 0x00, 0x00, 0x00, 675 0x00, 0x00, 0x00, 0x00, 676 0x50, 0x00, 0x00, 0x00, 677 0x00, 0x00, 0x00, 0x00, 678 679 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 680 }; 681 682 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */ 683 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = { 684 { ICE_MAC_OFOS, 0 }, 685 { ICE_IPV4_OFOS, 14 }, 686 { ICE_UDP_OF, 34 }, 687 { ICE_GTP, 42 }, 688 { ICE_IPV4_IL, 62 }, 689 { ICE_UDP_ILOS, 82 }, 690 { ICE_PROTOCOL_LAST, 0 }, 691 }; 692 693 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = { 694 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 695 0x00, 0x00, 0x00, 0x00, 696 0x00, 0x00, 0x00, 0x00, 697 0x08, 0x00, 698 699 0x45, 0x00, 0x00, 0x4c, /* IP 14 */ 700 0x00, 0x00, 0x00, 0x00, 701 0x00, 0x11, 0x00, 0x00, 702 0x00, 0x00, 0x00, 0x00, 703 0x00, 0x00, 0x00, 0x00, 704 705 0x00, 0x00, 0x08, 0x68, /* UDP 34 */ 706 0x00, 0x38, 0x00, 0x00, 707 708 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */ 709 0x00, 0x00, 0x00, 0x00, 710 0x00, 0x00, 0x00, 0x85, 711 712 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */ 713 0x00, 0x00, 0x00, 0x00, 714 715 0x45, 0x00, 0x00, 0x1c, /* IP 62 */ 716 0x00, 0x00, 0x00, 0x00, 717 0x00, 0x11, 0x00, 0x00, 718 0x00, 0x00, 0x00, 0x00, 719 0x00, 0x00, 0x00, 0x00, 720 721 0x00, 0x00, 0x00, 0x00, /* UDP 82 */ 722 0x00, 0x08, 0x00, 0x00, 723 724 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 725 }; 726 727 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */ 728 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = { 729 { ICE_MAC_OFOS, 0 }, 730 { ICE_IPV4_OFOS, 14 }, 731 { ICE_UDP_OF, 34 }, 732 { ICE_GTP, 42 }, 733 { ICE_IPV6_IL, 62 }, 734 { ICE_TCP_IL, 102 }, 735 { ICE_PROTOCOL_LAST, 0 }, 736 }; 737 738 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = { 739 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 740 0x00, 0x00, 0x00, 0x00, 741 0x00, 0x00, 0x00, 0x00, 742 0x08, 0x00, 743 744 0x45, 0x00, 0x00, 0x6c, /* IP 14 */ 745 0x00, 0x00, 0x00, 0x00, 746 0x00, 0x11, 0x00, 0x00, 747 0x00, 0x00, 0x00, 0x00, 748 0x00, 0x00, 0x00, 0x00, 749 750 0x00, 0x00, 0x08, 0x68, /* UDP 34 */ 751 0x00, 0x58, 0x00, 0x00, 752 753 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */ 754 0x00, 0x00, 0x00, 0x00, 755 0x00, 0x00, 0x00, 0x85, 756 757 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */ 758 0x00, 0x00, 0x00, 0x00, 759 760 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */ 761 0x00, 0x14, 0x06, 0x00, 762 0x00, 0x00, 0x00, 0x00, 763 0x00, 0x00, 0x00, 0x00, 764 0x00, 0x00, 0x00, 0x00, 765 0x00, 0x00, 0x00, 0x00, 766 0x00, 0x00, 0x00, 0x00, 767 0x00, 0x00, 0x00, 0x00, 768 0x00, 0x00, 0x00, 0x00, 769 0x00, 0x00, 0x00, 0x00, 770 771 0x00, 0x00, 0x00, 0x00, /* TCP 102 */ 772 0x00, 0x00, 0x00, 0x00, 773 0x00, 0x00, 0x00, 0x00, 774 0x50, 0x00, 0x00, 0x00, 775 0x00, 0x00, 0x00, 0x00, 776 777 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 778 }; 779 780 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = { 781 { ICE_MAC_OFOS, 0 }, 782 { ICE_IPV4_OFOS, 14 }, 783 { ICE_UDP_OF, 34 }, 784 { ICE_GTP, 42 }, 785 { ICE_IPV6_IL, 62 }, 786 { ICE_UDP_ILOS, 102 }, 787 { ICE_PROTOCOL_LAST, 0 }, 788 }; 789 790 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = { 791 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 792 0x00, 0x00, 0x00, 0x00, 793 0x00, 0x00, 0x00, 0x00, 794 0x08, 0x00, 795 796 0x45, 0x00, 0x00, 0x60, /* IP 14 */ 797 0x00, 0x00, 0x00, 0x00, 798 0x00, 0x11, 0x00, 0x00, 799 0x00, 0x00, 0x00, 0x00, 800 0x00, 0x00, 0x00, 0x00, 801 802 0x00, 0x00, 0x08, 0x68, /* UDP 34 */ 803 0x00, 0x4c, 0x00, 0x00, 804 805 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */ 806 0x00, 0x00, 0x00, 0x00, 807 0x00, 0x00, 0x00, 0x85, 808 809 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */ 810 0x00, 0x00, 0x00, 0x00, 811 812 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */ 813 0x00, 0x08, 0x11, 0x00, 814 0x00, 0x00, 0x00, 0x00, 815 0x00, 0x00, 0x00, 0x00, 816 0x00, 0x00, 0x00, 0x00, 817 0x00, 0x00, 0x00, 0x00, 818 0x00, 0x00, 0x00, 0x00, 819 0x00, 0x00, 0x00, 0x00, 820 0x00, 0x00, 0x00, 0x00, 821 0x00, 0x00, 0x00, 0x00, 822 823 0x00, 0x00, 0x00, 0x00, /* UDP 102 */ 824 0x00, 0x08, 0x00, 0x00, 825 826 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 827 }; 828 829 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = { 830 { ICE_MAC_OFOS, 0 }, 831 { ICE_IPV6_OFOS, 14 }, 832 { ICE_UDP_OF, 54 }, 833 { ICE_GTP, 62 }, 834 { ICE_IPV4_IL, 82 }, 835 { ICE_TCP_IL, 102 }, 836 { ICE_PROTOCOL_LAST, 0 }, 837 }; 838 839 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = { 840 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 841 0x00, 0x00, 0x00, 0x00, 842 0x00, 0x00, 0x00, 0x00, 843 0x86, 0xdd, 844 845 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */ 846 0x00, 0x44, 0x11, 0x00, 847 0x00, 0x00, 0x00, 0x00, 848 0x00, 0x00, 0x00, 0x00, 849 0x00, 0x00, 0x00, 0x00, 850 0x00, 0x00, 0x00, 0x00, 851 0x00, 0x00, 0x00, 0x00, 852 0x00, 0x00, 0x00, 0x00, 853 0x00, 0x00, 0x00, 0x00, 854 0x00, 0x00, 0x00, 0x00, 855 856 0x00, 0x00, 0x08, 0x68, /* UDP 54 */ 857 0x00, 0x44, 0x00, 0x00, 858 859 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */ 860 0x00, 0x00, 0x00, 0x00, 861 0x00, 0x00, 0x00, 0x85, 862 863 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */ 864 0x00, 0x00, 0x00, 0x00, 865 866 0x45, 0x00, 0x00, 0x28, /* IP 82 */ 867 0x00, 0x00, 0x00, 0x00, 868 0x00, 0x06, 0x00, 0x00, 869 0x00, 0x00, 0x00, 0x00, 870 0x00, 0x00, 0x00, 0x00, 871 872 0x00, 0x00, 0x00, 0x00, /* TCP 102 */ 873 0x00, 0x00, 0x00, 0x00, 874 0x00, 0x00, 0x00, 0x00, 875 0x50, 0x00, 0x00, 0x00, 876 0x00, 0x00, 0x00, 0x00, 877 878 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 879 }; 880 881 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = { 882 { ICE_MAC_OFOS, 0 }, 883 { ICE_IPV6_OFOS, 14 }, 884 { ICE_UDP_OF, 54 }, 885 { ICE_GTP, 62 }, 886 { ICE_IPV4_IL, 82 }, 887 { ICE_UDP_ILOS, 102 }, 888 { ICE_PROTOCOL_LAST, 0 }, 889 }; 890 891 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = { 892 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 893 0x00, 0x00, 0x00, 0x00, 894 0x00, 0x00, 0x00, 0x00, 895 0x86, 0xdd, 896 897 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */ 898 0x00, 0x38, 0x11, 0x00, 899 0x00, 0x00, 0x00, 0x00, 900 0x00, 0x00, 0x00, 0x00, 901 0x00, 0x00, 0x00, 0x00, 902 0x00, 0x00, 0x00, 0x00, 903 0x00, 0x00, 0x00, 0x00, 904 0x00, 0x00, 0x00, 0x00, 905 0x00, 0x00, 0x00, 0x00, 906 0x00, 0x00, 0x00, 0x00, 907 908 0x00, 0x00, 0x08, 0x68, /* UDP 54 */ 909 0x00, 0x38, 0x00, 0x00, 910 911 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */ 912 0x00, 0x00, 0x00, 0x00, 913 0x00, 0x00, 0x00, 0x85, 914 915 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */ 916 0x00, 0x00, 0x00, 0x00, 917 918 0x45, 0x00, 0x00, 0x1c, /* IP 82 */ 919 0x00, 0x00, 0x00, 0x00, 920 0x00, 0x11, 0x00, 0x00, 921 0x00, 0x00, 0x00, 0x00, 922 0x00, 0x00, 0x00, 0x00, 923 924 0x00, 0x00, 0x00, 0x00, /* UDP 102 */ 925 0x00, 0x08, 0x00, 0x00, 926 927 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 928 }; 929 930 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = { 931 { ICE_MAC_OFOS, 0 }, 932 { ICE_IPV6_OFOS, 14 }, 933 { ICE_UDP_OF, 54 }, 934 { ICE_GTP, 62 }, 935 { ICE_IPV6_IL, 82 }, 936 { ICE_TCP_IL, 122 }, 937 { ICE_PROTOCOL_LAST, 0 }, 938 }; 939 940 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = { 941 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 942 0x00, 0x00, 0x00, 0x00, 943 0x00, 0x00, 0x00, 0x00, 944 0x86, 0xdd, 945 946 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */ 947 0x00, 0x58, 0x11, 0x00, 948 0x00, 0x00, 0x00, 0x00, 949 0x00, 0x00, 0x00, 0x00, 950 0x00, 0x00, 0x00, 0x00, 951 0x00, 0x00, 0x00, 0x00, 952 0x00, 0x00, 0x00, 0x00, 953 0x00, 0x00, 0x00, 0x00, 954 0x00, 0x00, 0x00, 0x00, 955 0x00, 0x00, 0x00, 0x00, 956 957 0x00, 0x00, 0x08, 0x68, /* UDP 54 */ 958 0x00, 0x58, 0x00, 0x00, 959 960 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */ 961 0x00, 0x00, 0x00, 0x00, 962 0x00, 0x00, 0x00, 0x85, 963 964 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */ 965 0x00, 0x00, 0x00, 0x00, 966 967 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */ 968 0x00, 0x14, 0x06, 0x00, 969 0x00, 0x00, 0x00, 0x00, 970 0x00, 0x00, 0x00, 0x00, 971 0x00, 0x00, 0x00, 0x00, 972 0x00, 0x00, 0x00, 0x00, 973 0x00, 0x00, 0x00, 0x00, 974 0x00, 0x00, 0x00, 0x00, 975 0x00, 0x00, 0x00, 0x00, 976 0x00, 0x00, 0x00, 0x00, 977 978 0x00, 0x00, 0x00, 0x00, /* TCP 122 */ 979 0x00, 0x00, 0x00, 0x00, 980 0x00, 0x00, 0x00, 0x00, 981 0x50, 0x00, 0x00, 0x00, 982 0x00, 0x00, 0x00, 0x00, 983 984 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 985 }; 986 987 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = { 988 { ICE_MAC_OFOS, 0 }, 989 { ICE_IPV6_OFOS, 14 }, 990 { ICE_UDP_OF, 54 }, 991 { ICE_GTP, 62 }, 992 { ICE_IPV6_IL, 82 }, 993 { ICE_UDP_ILOS, 122 }, 994 { ICE_PROTOCOL_LAST, 0 }, 995 }; 996 997 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = { 998 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 999 0x00, 0x00, 0x00, 0x00, 1000 0x00, 0x00, 0x00, 0x00, 1001 0x86, 0xdd, 1002 1003 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */ 1004 0x00, 0x4c, 0x11, 0x00, 1005 0x00, 0x00, 0x00, 0x00, 1006 0x00, 0x00, 0x00, 0x00, 1007 0x00, 0x00, 0x00, 0x00, 1008 0x00, 0x00, 0x00, 0x00, 1009 0x00, 0x00, 0x00, 0x00, 1010 0x00, 0x00, 0x00, 0x00, 1011 0x00, 0x00, 0x00, 0x00, 1012 0x00, 0x00, 0x00, 0x00, 1013 1014 0x00, 0x00, 0x08, 0x68, /* UDP 54 */ 1015 0x00, 0x4c, 0x00, 0x00, 1016 1017 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */ 1018 0x00, 0x00, 0x00, 0x00, 1019 0x00, 0x00, 0x00, 0x85, 1020 1021 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */ 1022 0x00, 0x00, 0x00, 0x00, 1023 1024 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */ 1025 0x00, 0x08, 0x11, 0x00, 1026 0x00, 0x00, 0x00, 0x00, 1027 0x00, 0x00, 0x00, 0x00, 1028 0x00, 0x00, 0x00, 0x00, 1029 0x00, 0x00, 0x00, 0x00, 1030 0x00, 0x00, 0x00, 0x00, 1031 0x00, 0x00, 0x00, 0x00, 1032 0x00, 0x00, 0x00, 0x00, 1033 0x00, 0x00, 0x00, 0x00, 1034 1035 0x00, 0x00, 0x00, 0x00, /* UDP 122 */ 1036 0x00, 0x08, 0x00, 0x00, 1037 1038 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 1039 }; 1040 1041 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = { 1042 { ICE_MAC_OFOS, 0 }, 1043 { ICE_IPV4_OFOS, 14 }, 1044 { ICE_UDP_OF, 34 }, 1045 { ICE_GTP_NO_PAY, 42 }, 1046 { ICE_PROTOCOL_LAST, 0 }, 1047 }; 1048 1049 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = { 1050 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 1051 0x00, 0x00, 0x00, 0x00, 1052 0x00, 0x00, 0x00, 0x00, 1053 0x08, 0x00, 1054 1055 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */ 1056 0x00, 0x00, 0x40, 0x00, 1057 0x40, 0x11, 0x00, 0x00, 1058 0x00, 0x00, 0x00, 0x00, 1059 0x00, 0x00, 0x00, 0x00, 1060 1061 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */ 1062 0x00, 0x00, 0x00, 0x00, 1063 1064 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */ 1065 0x00, 0x00, 0x00, 0x00, 1066 0x00, 0x00, 0x00, 0x85, 1067 1068 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */ 1069 0x00, 0x00, 0x00, 0x00, 1070 1071 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */ 1072 0x00, 0x00, 0x40, 0x00, 1073 0x40, 0x00, 0x00, 0x00, 1074 0x00, 0x00, 0x00, 0x00, 1075 0x00, 0x00, 0x00, 0x00, 1076 0x00, 0x00, 1077 }; 1078 1079 ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = { 1080 { ICE_MAC_OFOS, 0 }, 1081 { ICE_IPV6_OFOS, 14 }, 1082 { ICE_UDP_OF, 54 }, 1083 { ICE_GTP_NO_PAY, 62 }, 1084 { ICE_PROTOCOL_LAST, 0 }, 1085 }; 1086 1087 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = { 1088 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 1089 0x00, 0x00, 0x00, 0x00, 1090 0x00, 0x00, 0x00, 0x00, 1091 0x86, 0xdd, 1092 1093 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */ 1094 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/ 1095 0x00, 0x00, 0x00, 0x00, 1096 0x00, 0x00, 0x00, 0x00, 1097 0x00, 0x00, 0x00, 0x00, 1098 0x00, 0x00, 0x00, 0x00, 1099 0x00, 0x00, 0x00, 0x00, 1100 0x00, 0x00, 0x00, 0x00, 1101 0x00, 0x00, 0x00, 0x00, 1102 0x00, 0x00, 0x00, 0x00, 1103 1104 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */ 1105 0x00, 0x00, 0x00, 0x00, 1106 1107 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */ 1108 0x00, 0x00, 0x00, 0x00, 1109 1110 0x00, 0x00, 1111 }; 1112 1113 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = { 1114 { ICE_MAC_OFOS, 0 }, 1115 { ICE_ETYPE_OL, 12 }, 1116 { ICE_PPPOE, 14 }, 1117 { ICE_IPV4_OFOS, 22 }, 1118 { ICE_TCP_IL, 42 }, 1119 { ICE_PROTOCOL_LAST, 0 }, 1120 }; 1121 1122 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_tcp) = { 1123 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 1124 0x00, 0x00, 0x00, 0x00, 1125 0x00, 0x00, 0x00, 0x00, 1126 1127 0x88, 0x64, /* ICE_ETYPE_OL 12 */ 1128 1129 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */ 1130 0x00, 0x16, 1131 1132 0x00, 0x21, /* PPP Link Layer 20 */ 1133 1134 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */ 1135 0x00, 0x01, 0x00, 0x00, 1136 0x00, 0x06, 0x00, 0x00, 1137 0x00, 0x00, 0x00, 0x00, 1138 0x00, 0x00, 0x00, 0x00, 1139 1140 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */ 1141 0x00, 0x00, 0x00, 0x00, 1142 0x00, 0x00, 0x00, 0x00, 1143 0x50, 0x00, 0x00, 0x00, 1144 0x00, 0x00, 0x00, 0x00, 1145 1146 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ 1147 }; 1148 1149 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_udp) = { 1150 { ICE_MAC_OFOS, 0 }, 1151 { ICE_ETYPE_OL, 12 }, 1152 { ICE_PPPOE, 14 }, 1153 { ICE_IPV4_OFOS, 22 }, 1154 { ICE_UDP_ILOS, 42 }, 1155 { ICE_PROTOCOL_LAST, 0 }, 1156 }; 1157 1158 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_udp) = { 1159 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 1160 0x00, 0x00, 0x00, 0x00, 1161 0x00, 0x00, 0x00, 0x00, 1162 1163 0x88, 0x64, /* ICE_ETYPE_OL 12 */ 1164 1165 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */ 1166 0x00, 0x16, 1167 1168 0x00, 0x21, /* PPP Link Layer 20 */ 1169 1170 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */ 1171 0x00, 0x01, 0x00, 0x00, 1172 0x00, 0x11, 0x00, 0x00, 1173 0x00, 0x00, 0x00, 0x00, 1174 0x00, 0x00, 0x00, 0x00, 1175 1176 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */ 1177 0x00, 0x08, 0x00, 0x00, 1178 1179 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ 1180 }; 1181 1182 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_tcp) = { 1183 { ICE_MAC_OFOS, 0 }, 1184 { ICE_ETYPE_OL, 12 }, 1185 { ICE_PPPOE, 14 }, 1186 { ICE_IPV6_OFOS, 22 }, 1187 { ICE_TCP_IL, 62 }, 1188 { ICE_PROTOCOL_LAST, 0 }, 1189 }; 1190 1191 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_tcp) = { 1192 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 1193 0x00, 0x00, 0x00, 0x00, 1194 0x00, 0x00, 0x00, 0x00, 1195 1196 0x88, 0x64, /* ICE_ETYPE_OL 12 */ 1197 1198 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */ 1199 0x00, 0x2a, 1200 1201 0x00, 0x57, /* PPP Link Layer 20 */ 1202 1203 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */ 1204 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ 1205 0x00, 0x00, 0x00, 0x00, 1206 0x00, 0x00, 0x00, 0x00, 1207 0x00, 0x00, 0x00, 0x00, 1208 0x00, 0x00, 0x00, 0x00, 1209 0x00, 0x00, 0x00, 0x00, 1210 0x00, 0x00, 0x00, 0x00, 1211 0x00, 0x00, 0x00, 0x00, 1212 0x00, 0x00, 0x00, 0x00, 1213 1214 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */ 1215 0x00, 0x00, 0x00, 0x00, 1216 0x00, 0x00, 0x00, 0x00, 1217 0x50, 0x00, 0x00, 0x00, 1218 0x00, 0x00, 0x00, 0x00, 1219 1220 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ 1221 }; 1222 1223 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_udp) = { 1224 { ICE_MAC_OFOS, 0 }, 1225 { ICE_ETYPE_OL, 12 }, 1226 { ICE_PPPOE, 14 }, 1227 { ICE_IPV6_OFOS, 22 }, 1228 { ICE_UDP_ILOS, 62 }, 1229 { ICE_PROTOCOL_LAST, 0 }, 1230 }; 1231 1232 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = { 1233 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 1234 0x00, 0x00, 0x00, 0x00, 1235 0x00, 0x00, 0x00, 0x00, 1236 1237 0x88, 0x64, /* ICE_ETYPE_OL 12 */ 1238 1239 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */ 1240 0x00, 0x2a, 1241 1242 0x00, 0x57, /* PPP Link Layer 20 */ 1243 1244 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */ 1245 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/ 1246 0x00, 0x00, 0x00, 0x00, 1247 0x00, 0x00, 0x00, 0x00, 1248 0x00, 0x00, 0x00, 0x00, 1249 0x00, 0x00, 0x00, 0x00, 1250 0x00, 0x00, 0x00, 0x00, 1251 0x00, 0x00, 0x00, 0x00, 1252 0x00, 0x00, 0x00, 0x00, 1253 0x00, 0x00, 0x00, 0x00, 1254 1255 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */ 1256 0x00, 0x08, 0x00, 0x00, 1257 1258 0x00, 0x00, /* 2 bytes for 4 bytes alignment */ 1259 }; 1260 1261 static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = { 1262 ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 | 1263 ICE_PKT_GTP_NOPAY), 1264 ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU | 1265 ICE_PKT_OUTER_IPV6 | 1266 ICE_PKT_INNER_IPV6 | 1267 ICE_PKT_INNER_UDP), 1268 ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU | 1269 ICE_PKT_OUTER_IPV6 | 1270 ICE_PKT_INNER_IPV6), 1271 ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU | 1272 ICE_PKT_OUTER_IPV6 | 1273 ICE_PKT_INNER_UDP), 1274 ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU | 1275 ICE_PKT_OUTER_IPV6), 1276 ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY), 1277 ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU | 1278 ICE_PKT_INNER_IPV6 | 1279 ICE_PKT_INNER_UDP), 1280 ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU | 1281 ICE_PKT_INNER_IPV6), 1282 ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU | 1283 ICE_PKT_INNER_UDP), 1284 ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU), 1285 ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6), 1286 ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC), 1287 ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 | 1288 ICE_PKT_INNER_UDP), 1289 ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6), 1290 ICE_PKT_PROFILE(pppoe_ipv4_udp, ICE_PKT_PPPOE | ICE_PKT_INNER_UDP), 1291 ICE_PKT_PROFILE(pppoe_ipv4_tcp, ICE_PKT_PPPOE), 1292 ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 | 1293 ICE_PKT_INNER_TCP), 1294 ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP), 1295 ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6), 1296 ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE), 1297 ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP | 1298 ICE_PKT_INNER_IPV6 | 1299 ICE_PKT_INNER_TCP), 1300 ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP), 1301 ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP | 1302 ICE_PKT_INNER_IPV6), 1303 ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP), 1304 ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP), 1305 ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP), 1306 ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6), 1307 ICE_PKT_PROFILE(tcp, 0), 1308 }; 1309 1310 #define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l)) 1311 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \ 1312 ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN) 1313 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \ 1314 ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0) 1315 #define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n)) 1316 #define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n)) 1317 1318 /* this is a recipe to profile association bitmap */ 1319 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES], 1320 ICE_MAX_NUM_PROFILES); 1321 1322 /* this is a profile to recipe association bitmap */ 1323 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES], 1324 ICE_MAX_NUM_RECIPES); 1325 1326 /** 1327 * ice_init_def_sw_recp - initialize the recipe book keeping tables 1328 * @hw: pointer to the HW struct 1329 * 1330 * Allocate memory for the entire recipe table and initialize the structures/ 1331 * entries corresponding to basic recipes. 1332 */ 1333 int ice_init_def_sw_recp(struct ice_hw *hw) 1334 { 1335 struct ice_sw_recipe *recps; 1336 u8 i; 1337 1338 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, 1339 sizeof(*recps), GFP_KERNEL); 1340 if (!recps) 1341 return -ENOMEM; 1342 1343 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 1344 recps[i].root_rid = i; 1345 INIT_LIST_HEAD(&recps[i].filt_rules); 1346 INIT_LIST_HEAD(&recps[i].filt_replay_rules); 1347 INIT_LIST_HEAD(&recps[i].rg_list); 1348 mutex_init(&recps[i].filt_rule_lock); 1349 } 1350 1351 hw->switch_info->recp_list = recps; 1352 1353 return 0; 1354 } 1355 1356 /** 1357 * ice_aq_get_sw_cfg - get switch configuration 1358 * @hw: pointer to the hardware structure 1359 * @buf: pointer to the result buffer 1360 * @buf_size: length of the buffer available for response 1361 * @req_desc: pointer to requested descriptor 1362 * @num_elems: pointer to number of elements 1363 * @cd: pointer to command details structure or NULL 1364 * 1365 * Get switch configuration (0x0200) to be placed in buf. 1366 * This admin command returns information such as initial VSI/port number 1367 * and switch ID it belongs to. 1368 * 1369 * NOTE: *req_desc is both an input/output parameter. 1370 * The caller of this function first calls this function with *request_desc set 1371 * to 0. If the response from f/w has *req_desc set to 0, all the switch 1372 * configuration information has been returned; if non-zero (meaning not all 1373 * the information was returned), the caller should call this function again 1374 * with *req_desc set to the previous value returned by f/w to get the 1375 * next block of switch configuration information. 1376 * 1377 * *num_elems is output only parameter. This reflects the number of elements 1378 * in response buffer. The caller of this function to use *num_elems while 1379 * parsing the response buffer. 1380 */ 1381 static int 1382 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, 1383 u16 buf_size, u16 *req_desc, u16 *num_elems, 1384 struct ice_sq_cd *cd) 1385 { 1386 struct ice_aqc_get_sw_cfg *cmd; 1387 struct ice_aq_desc desc; 1388 int status; 1389 1390 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); 1391 cmd = &desc.params.get_sw_conf; 1392 cmd->element = cpu_to_le16(*req_desc); 1393 1394 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 1395 if (!status) { 1396 *req_desc = le16_to_cpu(cmd->element); 1397 *num_elems = le16_to_cpu(cmd->num_elems); 1398 } 1399 1400 return status; 1401 } 1402 1403 /** 1404 * ice_aq_add_vsi 1405 * @hw: pointer to the HW struct 1406 * @vsi_ctx: pointer to a VSI context struct 1407 * @cd: pointer to command details structure or NULL 1408 * 1409 * Add a VSI context to the hardware (0x0210) 1410 */ 1411 static int 1412 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 1413 struct ice_sq_cd *cd) 1414 { 1415 struct ice_aqc_add_update_free_vsi_resp *res; 1416 struct ice_aqc_add_get_update_free_vsi *cmd; 1417 struct ice_aq_desc desc; 1418 int status; 1419 1420 cmd = &desc.params.vsi_cmd; 1421 res = &desc.params.add_update_free_vsi_res; 1422 1423 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); 1424 1425 if (!vsi_ctx->alloc_from_pool) 1426 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | 1427 ICE_AQ_VSI_IS_VALID); 1428 cmd->vf_id = vsi_ctx->vf_num; 1429 1430 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1431 1432 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1433 1434 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 1435 sizeof(vsi_ctx->info), cd); 1436 1437 if (!status) { 1438 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M; 1439 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used); 1440 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free); 1441 } 1442 1443 return status; 1444 } 1445 1446 /** 1447 * ice_aq_free_vsi 1448 * @hw: pointer to the HW struct 1449 * @vsi_ctx: pointer to a VSI context struct 1450 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 1451 * @cd: pointer to command details structure or NULL 1452 * 1453 * Free VSI context info from hardware (0x0213) 1454 */ 1455 static int 1456 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 1457 bool keep_vsi_alloc, struct ice_sq_cd *cd) 1458 { 1459 struct ice_aqc_add_update_free_vsi_resp *resp; 1460 struct ice_aqc_add_get_update_free_vsi *cmd; 1461 struct ice_aq_desc desc; 1462 int status; 1463 1464 cmd = &desc.params.vsi_cmd; 1465 resp = &desc.params.add_update_free_vsi_res; 1466 1467 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); 1468 1469 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 1470 if (keep_vsi_alloc) 1471 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); 1472 1473 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1474 if (!status) { 1475 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 1476 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 1477 } 1478 1479 return status; 1480 } 1481 1482 /** 1483 * ice_aq_update_vsi 1484 * @hw: pointer to the HW struct 1485 * @vsi_ctx: pointer to a VSI context struct 1486 * @cd: pointer to command details structure or NULL 1487 * 1488 * Update VSI context in the hardware (0x0211) 1489 */ 1490 static int 1491 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 1492 struct ice_sq_cd *cd) 1493 { 1494 struct ice_aqc_add_update_free_vsi_resp *resp; 1495 struct ice_aqc_add_get_update_free_vsi *cmd; 1496 struct ice_aq_desc desc; 1497 int status; 1498 1499 cmd = &desc.params.vsi_cmd; 1500 resp = &desc.params.add_update_free_vsi_res; 1501 1502 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); 1503 1504 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 1505 1506 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1507 1508 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 1509 sizeof(vsi_ctx->info), cd); 1510 1511 if (!status) { 1512 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 1513 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 1514 } 1515 1516 return status; 1517 } 1518 1519 /** 1520 * ice_is_vsi_valid - check whether the VSI is valid or not 1521 * @hw: pointer to the HW struct 1522 * @vsi_handle: VSI handle 1523 * 1524 * check whether the VSI is valid or not 1525 */ 1526 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle) 1527 { 1528 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle]; 1529 } 1530 1531 /** 1532 * ice_get_hw_vsi_num - return the HW VSI number 1533 * @hw: pointer to the HW struct 1534 * @vsi_handle: VSI handle 1535 * 1536 * return the HW VSI number 1537 * Caution: call this function only if VSI is valid (ice_is_vsi_valid) 1538 */ 1539 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) 1540 { 1541 return hw->vsi_ctx[vsi_handle]->vsi_num; 1542 } 1543 1544 /** 1545 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle 1546 * @hw: pointer to the HW struct 1547 * @vsi_handle: VSI handle 1548 * 1549 * return the VSI context entry for a given VSI handle 1550 */ 1551 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 1552 { 1553 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle]; 1554 } 1555 1556 /** 1557 * ice_save_vsi_ctx - save the VSI context for a given VSI handle 1558 * @hw: pointer to the HW struct 1559 * @vsi_handle: VSI handle 1560 * @vsi: VSI context pointer 1561 * 1562 * save the VSI context entry for a given VSI handle 1563 */ 1564 static void 1565 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) 1566 { 1567 hw->vsi_ctx[vsi_handle] = vsi; 1568 } 1569 1570 /** 1571 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs 1572 * @hw: pointer to the HW struct 1573 * @vsi_handle: VSI handle 1574 */ 1575 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle) 1576 { 1577 struct ice_vsi_ctx *vsi; 1578 u8 i; 1579 1580 vsi = ice_get_vsi_ctx(hw, vsi_handle); 1581 if (!vsi) 1582 return; 1583 ice_for_each_traffic_class(i) { 1584 if (vsi->lan_q_ctx[i]) { 1585 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]); 1586 vsi->lan_q_ctx[i] = NULL; 1587 } 1588 if (vsi->rdma_q_ctx[i]) { 1589 devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]); 1590 vsi->rdma_q_ctx[i] = NULL; 1591 } 1592 } 1593 } 1594 1595 /** 1596 * ice_clear_vsi_ctx - clear the VSI context entry 1597 * @hw: pointer to the HW struct 1598 * @vsi_handle: VSI handle 1599 * 1600 * clear the VSI context entry 1601 */ 1602 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 1603 { 1604 struct ice_vsi_ctx *vsi; 1605 1606 vsi = ice_get_vsi_ctx(hw, vsi_handle); 1607 if (vsi) { 1608 ice_clear_vsi_q_ctx(hw, vsi_handle); 1609 devm_kfree(ice_hw_to_dev(hw), vsi); 1610 hw->vsi_ctx[vsi_handle] = NULL; 1611 } 1612 } 1613 1614 /** 1615 * ice_clear_all_vsi_ctx - clear all the VSI context entries 1616 * @hw: pointer to the HW struct 1617 */ 1618 void ice_clear_all_vsi_ctx(struct ice_hw *hw) 1619 { 1620 u16 i; 1621 1622 for (i = 0; i < ICE_MAX_VSI; i++) 1623 ice_clear_vsi_ctx(hw, i); 1624 } 1625 1626 /** 1627 * ice_add_vsi - add VSI context to the hardware and VSI handle list 1628 * @hw: pointer to the HW struct 1629 * @vsi_handle: unique VSI handle provided by drivers 1630 * @vsi_ctx: pointer to a VSI context struct 1631 * @cd: pointer to command details structure or NULL 1632 * 1633 * Add a VSI context to the hardware also add it into the VSI handle list. 1634 * If this function gets called after reset for existing VSIs then update 1635 * with the new HW VSI number in the corresponding VSI handle list entry. 1636 */ 1637 int 1638 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 1639 struct ice_sq_cd *cd) 1640 { 1641 struct ice_vsi_ctx *tmp_vsi_ctx; 1642 int status; 1643 1644 if (vsi_handle >= ICE_MAX_VSI) 1645 return -EINVAL; 1646 status = ice_aq_add_vsi(hw, vsi_ctx, cd); 1647 if (status) 1648 return status; 1649 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 1650 if (!tmp_vsi_ctx) { 1651 /* Create a new VSI context */ 1652 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw), 1653 sizeof(*tmp_vsi_ctx), GFP_KERNEL); 1654 if (!tmp_vsi_ctx) { 1655 ice_aq_free_vsi(hw, vsi_ctx, false, cd); 1656 return -ENOMEM; 1657 } 1658 *tmp_vsi_ctx = *vsi_ctx; 1659 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); 1660 } else { 1661 /* update with new HW VSI num */ 1662 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; 1663 } 1664 1665 return 0; 1666 } 1667 1668 /** 1669 * ice_free_vsi- free VSI context from hardware and VSI handle list 1670 * @hw: pointer to the HW struct 1671 * @vsi_handle: unique VSI handle 1672 * @vsi_ctx: pointer to a VSI context struct 1673 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 1674 * @cd: pointer to command details structure or NULL 1675 * 1676 * Free VSI context info from hardware as well as from VSI handle list 1677 */ 1678 int 1679 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 1680 bool keep_vsi_alloc, struct ice_sq_cd *cd) 1681 { 1682 int status; 1683 1684 if (!ice_is_vsi_valid(hw, vsi_handle)) 1685 return -EINVAL; 1686 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 1687 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); 1688 if (!status) 1689 ice_clear_vsi_ctx(hw, vsi_handle); 1690 return status; 1691 } 1692 1693 /** 1694 * ice_update_vsi 1695 * @hw: pointer to the HW struct 1696 * @vsi_handle: unique VSI handle 1697 * @vsi_ctx: pointer to a VSI context struct 1698 * @cd: pointer to command details structure or NULL 1699 * 1700 * Update VSI context in the hardware 1701 */ 1702 int 1703 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 1704 struct ice_sq_cd *cd) 1705 { 1706 if (!ice_is_vsi_valid(hw, vsi_handle)) 1707 return -EINVAL; 1708 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 1709 return ice_aq_update_vsi(hw, vsi_ctx, cd); 1710 } 1711 1712 /** 1713 * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI 1714 * @hw: pointer to HW struct 1715 * @vsi_handle: VSI SW index 1716 * @enable: boolean for enable/disable 1717 */ 1718 int 1719 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) 1720 { 1721 struct ice_vsi_ctx *ctx; 1722 1723 ctx = ice_get_vsi_ctx(hw, vsi_handle); 1724 if (!ctx) 1725 return -EIO; 1726 1727 if (enable) 1728 ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; 1729 else 1730 ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; 1731 1732 return ice_update_vsi(hw, vsi_handle, ctx, NULL); 1733 } 1734 1735 /** 1736 * ice_aq_alloc_free_vsi_list 1737 * @hw: pointer to the HW struct 1738 * @vsi_list_id: VSI list ID returned or used for lookup 1739 * @lkup_type: switch rule filter lookup type 1740 * @opc: switch rules population command type - pass in the command opcode 1741 * 1742 * allocates or free a VSI list resource 1743 */ 1744 static int 1745 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, 1746 enum ice_sw_lkup_type lkup_type, 1747 enum ice_adminq_opc opc) 1748 { 1749 struct ice_aqc_alloc_free_res_elem *sw_buf; 1750 struct ice_aqc_res_elem *vsi_ele; 1751 u16 buf_len; 1752 int status; 1753 1754 buf_len = struct_size(sw_buf, elem, 1); 1755 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); 1756 if (!sw_buf) 1757 return -ENOMEM; 1758 sw_buf->num_elems = cpu_to_le16(1); 1759 1760 if (lkup_type == ICE_SW_LKUP_MAC || 1761 lkup_type == ICE_SW_LKUP_MAC_VLAN || 1762 lkup_type == ICE_SW_LKUP_ETHERTYPE || 1763 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 1764 lkup_type == ICE_SW_LKUP_PROMISC || 1765 lkup_type == ICE_SW_LKUP_PROMISC_VLAN || 1766 lkup_type == ICE_SW_LKUP_DFLT) { 1767 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); 1768 } else if (lkup_type == ICE_SW_LKUP_VLAN) { 1769 sw_buf->res_type = 1770 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); 1771 } else { 1772 status = -EINVAL; 1773 goto ice_aq_alloc_free_vsi_list_exit; 1774 } 1775 1776 if (opc == ice_aqc_opc_free_res) 1777 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id); 1778 1779 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL); 1780 if (status) 1781 goto ice_aq_alloc_free_vsi_list_exit; 1782 1783 if (opc == ice_aqc_opc_alloc_res) { 1784 vsi_ele = &sw_buf->elem[0]; 1785 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp); 1786 } 1787 1788 ice_aq_alloc_free_vsi_list_exit: 1789 devm_kfree(ice_hw_to_dev(hw), sw_buf); 1790 return status; 1791 } 1792 1793 /** 1794 * ice_aq_sw_rules - add/update/remove switch rules 1795 * @hw: pointer to the HW struct 1796 * @rule_list: pointer to switch rule population list 1797 * @rule_list_sz: total size of the rule list in bytes 1798 * @num_rules: number of switch rules in the rule_list 1799 * @opc: switch rules population command type - pass in the command opcode 1800 * @cd: pointer to command details structure or NULL 1801 * 1802 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware 1803 */ 1804 int 1805 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, 1806 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1807 { 1808 struct ice_aq_desc desc; 1809 int status; 1810 1811 if (opc != ice_aqc_opc_add_sw_rules && 1812 opc != ice_aqc_opc_update_sw_rules && 1813 opc != ice_aqc_opc_remove_sw_rules) 1814 return -EINVAL; 1815 1816 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1817 1818 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1819 desc.params.sw_rules.num_rules_fltr_entry_index = 1820 cpu_to_le16(num_rules); 1821 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); 1822 if (opc != ice_aqc_opc_add_sw_rules && 1823 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) 1824 status = -ENOENT; 1825 1826 return status; 1827 } 1828 1829 /** 1830 * ice_aq_add_recipe - add switch recipe 1831 * @hw: pointer to the HW struct 1832 * @s_recipe_list: pointer to switch rule population list 1833 * @num_recipes: number of switch recipes in the list 1834 * @cd: pointer to command details structure or NULL 1835 * 1836 * Add(0x0290) 1837 */ 1838 static int 1839 ice_aq_add_recipe(struct ice_hw *hw, 1840 struct ice_aqc_recipe_data_elem *s_recipe_list, 1841 u16 num_recipes, struct ice_sq_cd *cd) 1842 { 1843 struct ice_aqc_add_get_recipe *cmd; 1844 struct ice_aq_desc desc; 1845 u16 buf_size; 1846 1847 cmd = &desc.params.add_get_recipe; 1848 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe); 1849 1850 cmd->num_sub_recipes = cpu_to_le16(num_recipes); 1851 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1852 1853 buf_size = num_recipes * sizeof(*s_recipe_list); 1854 1855 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); 1856 } 1857 1858 /** 1859 * ice_aq_get_recipe - get switch recipe 1860 * @hw: pointer to the HW struct 1861 * @s_recipe_list: pointer to switch rule population list 1862 * @num_recipes: pointer to the number of recipes (input and output) 1863 * @recipe_root: root recipe number of recipe(s) to retrieve 1864 * @cd: pointer to command details structure or NULL 1865 * 1866 * Get(0x0292) 1867 * 1868 * On input, *num_recipes should equal the number of entries in s_recipe_list. 1869 * On output, *num_recipes will equal the number of entries returned in 1870 * s_recipe_list. 1871 * 1872 * The caller must supply enough space in s_recipe_list to hold all possible 1873 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES. 1874 */ 1875 static int 1876 ice_aq_get_recipe(struct ice_hw *hw, 1877 struct ice_aqc_recipe_data_elem *s_recipe_list, 1878 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd) 1879 { 1880 struct ice_aqc_add_get_recipe *cmd; 1881 struct ice_aq_desc desc; 1882 u16 buf_size; 1883 int status; 1884 1885 if (*num_recipes != ICE_MAX_NUM_RECIPES) 1886 return -EINVAL; 1887 1888 cmd = &desc.params.add_get_recipe; 1889 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe); 1890 1891 cmd->return_index = cpu_to_le16(recipe_root); 1892 cmd->num_sub_recipes = 0; 1893 1894 buf_size = *num_recipes * sizeof(*s_recipe_list); 1895 1896 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); 1897 *num_recipes = le16_to_cpu(cmd->num_sub_recipes); 1898 1899 return status; 1900 } 1901 1902 /** 1903 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx 1904 * @hw: pointer to the HW struct 1905 * @params: parameters used to update the default recipe 1906 * 1907 * This function only supports updating default recipes and it only supports 1908 * updating a single recipe based on the lkup_idx at a time. 1909 * 1910 * This is done as a read-modify-write operation. First, get the current recipe 1911 * contents based on the recipe's ID. Then modify the field vector index and 1912 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update 1913 * the pre-existing recipe with the modifications. 1914 */ 1915 int 1916 ice_update_recipe_lkup_idx(struct ice_hw *hw, 1917 struct ice_update_recipe_lkup_idx_params *params) 1918 { 1919 struct ice_aqc_recipe_data_elem *rcp_list; 1920 u16 num_recps = ICE_MAX_NUM_RECIPES; 1921 int status; 1922 1923 rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL); 1924 if (!rcp_list) 1925 return -ENOMEM; 1926 1927 /* read current recipe list from firmware */ 1928 rcp_list->recipe_indx = params->rid; 1929 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL); 1930 if (status) { 1931 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n", 1932 params->rid, status); 1933 goto error_out; 1934 } 1935 1936 /* only modify existing recipe's lkup_idx and mask if valid, while 1937 * leaving all other fields the same, then update the recipe firmware 1938 */ 1939 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx; 1940 if (params->mask_valid) 1941 rcp_list->content.mask[params->lkup_idx] = 1942 cpu_to_le16(params->mask); 1943 1944 if (params->ignore_valid) 1945 rcp_list->content.lkup_indx[params->lkup_idx] |= 1946 ICE_AQ_RECIPE_LKUP_IGNORE; 1947 1948 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL); 1949 if (status) 1950 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n", 1951 params->rid, params->lkup_idx, params->fv_idx, 1952 params->mask, params->mask_valid ? "true" : "false", 1953 status); 1954 1955 error_out: 1956 kfree(rcp_list); 1957 return status; 1958 } 1959 1960 /** 1961 * ice_aq_map_recipe_to_profile - Map recipe to packet profile 1962 * @hw: pointer to the HW struct 1963 * @profile_id: package profile ID to associate the recipe with 1964 * @r_bitmap: Recipe bitmap filled in and need to be returned as response 1965 * @cd: pointer to command details structure or NULL 1966 * Recipe to profile association (0x0291) 1967 */ 1968 static int 1969 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, 1970 struct ice_sq_cd *cd) 1971 { 1972 struct ice_aqc_recipe_to_profile *cmd; 1973 struct ice_aq_desc desc; 1974 1975 cmd = &desc.params.recipe_to_profile; 1976 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile); 1977 cmd->profile_id = cpu_to_le16(profile_id); 1978 /* Set the recipe ID bit in the bitmask to let the device know which 1979 * profile we are associating the recipe to 1980 */ 1981 memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc)); 1982 1983 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1984 } 1985 1986 /** 1987 * ice_aq_get_recipe_to_profile - Map recipe to packet profile 1988 * @hw: pointer to the HW struct 1989 * @profile_id: package profile ID to associate the recipe with 1990 * @r_bitmap: Recipe bitmap filled in and need to be returned as response 1991 * @cd: pointer to command details structure or NULL 1992 * Associate profile ID with given recipe (0x0293) 1993 */ 1994 static int 1995 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, 1996 struct ice_sq_cd *cd) 1997 { 1998 struct ice_aqc_recipe_to_profile *cmd; 1999 struct ice_aq_desc desc; 2000 int status; 2001 2002 cmd = &desc.params.recipe_to_profile; 2003 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile); 2004 cmd->profile_id = cpu_to_le16(profile_id); 2005 2006 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2007 if (!status) 2008 memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc)); 2009 2010 return status; 2011 } 2012 2013 /** 2014 * ice_alloc_recipe - add recipe resource 2015 * @hw: pointer to the hardware structure 2016 * @rid: recipe ID returned as response to AQ call 2017 */ 2018 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) 2019 { 2020 struct ice_aqc_alloc_free_res_elem *sw_buf; 2021 u16 buf_len; 2022 int status; 2023 2024 buf_len = struct_size(sw_buf, elem, 1); 2025 sw_buf = kzalloc(buf_len, GFP_KERNEL); 2026 if (!sw_buf) 2027 return -ENOMEM; 2028 2029 sw_buf->num_elems = cpu_to_le16(1); 2030 sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << 2031 ICE_AQC_RES_TYPE_S) | 2032 ICE_AQC_RES_TYPE_FLAG_SHARED); 2033 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, 2034 ice_aqc_opc_alloc_res, NULL); 2035 if (!status) 2036 *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); 2037 kfree(sw_buf); 2038 2039 return status; 2040 } 2041 2042 /** 2043 * ice_get_recp_to_prof_map - updates recipe to profile mapping 2044 * @hw: pointer to hardware structure 2045 * 2046 * This function is used to populate recipe_to_profile matrix where index to 2047 * this array is the recipe ID and the element is the mapping of which profiles 2048 * is this recipe mapped to. 2049 */ 2050 static void ice_get_recp_to_prof_map(struct ice_hw *hw) 2051 { 2052 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); 2053 u16 i; 2054 2055 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) { 2056 u16 j; 2057 2058 bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES); 2059 bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES); 2060 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL)) 2061 continue; 2062 bitmap_copy(profile_to_recipe[i], r_bitmap, 2063 ICE_MAX_NUM_RECIPES); 2064 for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES) 2065 set_bit(i, recipe_to_profile[j]); 2066 } 2067 } 2068 2069 /** 2070 * ice_collect_result_idx - copy result index values 2071 * @buf: buffer that contains the result index 2072 * @recp: the recipe struct to copy data into 2073 */ 2074 static void 2075 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, 2076 struct ice_sw_recipe *recp) 2077 { 2078 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN) 2079 set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, 2080 recp->res_idxs); 2081 } 2082 2083 /** 2084 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries 2085 * @hw: pointer to hardware structure 2086 * @recps: struct that we need to populate 2087 * @rid: recipe ID that we are populating 2088 * @refresh_required: true if we should get recipe to profile mapping from FW 2089 * 2090 * This function is used to populate all the necessary entries into our 2091 * bookkeeping so that we have a current list of all the recipes that are 2092 * programmed in the firmware. 2093 */ 2094 static int 2095 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, 2096 bool *refresh_required) 2097 { 2098 DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS); 2099 struct ice_aqc_recipe_data_elem *tmp; 2100 u16 num_recps = ICE_MAX_NUM_RECIPES; 2101 struct ice_prot_lkup_ext *lkup_exts; 2102 u8 fv_word_idx = 0; 2103 u16 sub_recps; 2104 int status; 2105 2106 bitmap_zero(result_bm, ICE_MAX_FV_WORDS); 2107 2108 /* we need a buffer big enough to accommodate all the recipes */ 2109 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); 2110 if (!tmp) 2111 return -ENOMEM; 2112 2113 tmp[0].recipe_indx = rid; 2114 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL); 2115 /* non-zero status meaning recipe doesn't exist */ 2116 if (status) 2117 goto err_unroll; 2118 2119 /* Get recipe to profile map so that we can get the fv from lkups that 2120 * we read for a recipe from FW. Since we want to minimize the number of 2121 * times we make this FW call, just make one call and cache the copy 2122 * until a new recipe is added. This operation is only required the 2123 * first time to get the changes from FW. Then to search existing 2124 * entries we don't need to update the cache again until another recipe 2125 * gets added. 2126 */ 2127 if (*refresh_required) { 2128 ice_get_recp_to_prof_map(hw); 2129 *refresh_required = false; 2130 } 2131 2132 /* Start populating all the entries for recps[rid] based on lkups from 2133 * firmware. Note that we are only creating the root recipe in our 2134 * database. 2135 */ 2136 lkup_exts = &recps[rid].lkup_exts; 2137 2138 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) { 2139 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps]; 2140 struct ice_recp_grp_entry *rg_entry; 2141 u8 i, prof, idx, prot = 0; 2142 bool is_root; 2143 u16 off = 0; 2144 2145 rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), 2146 GFP_KERNEL); 2147 if (!rg_entry) { 2148 status = -ENOMEM; 2149 goto err_unroll; 2150 } 2151 2152 idx = root_bufs.recipe_indx; 2153 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT; 2154 2155 /* Mark all result indices in this chain */ 2156 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) 2157 set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, 2158 result_bm); 2159 2160 /* get the first profile that is associated with rid */ 2161 prof = find_first_bit(recipe_to_profile[idx], 2162 ICE_MAX_NUM_PROFILES); 2163 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { 2164 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1]; 2165 2166 rg_entry->fv_idx[i] = lkup_indx; 2167 rg_entry->fv_mask[i] = 2168 le16_to_cpu(root_bufs.content.mask[i + 1]); 2169 2170 /* If the recipe is a chained recipe then all its 2171 * child recipe's result will have a result index. 2172 * To fill fv_words we should not use those result 2173 * index, we only need the protocol ids and offsets. 2174 * We will skip all the fv_idx which stores result 2175 * index in them. We also need to skip any fv_idx which 2176 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a 2177 * valid offset value. 2178 */ 2179 if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) || 2180 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE || 2181 rg_entry->fv_idx[i] == 0) 2182 continue; 2183 2184 ice_find_prot_off(hw, ICE_BLK_SW, prof, 2185 rg_entry->fv_idx[i], &prot, &off); 2186 lkup_exts->fv_words[fv_word_idx].prot_id = prot; 2187 lkup_exts->fv_words[fv_word_idx].off = off; 2188 lkup_exts->field_mask[fv_word_idx] = 2189 rg_entry->fv_mask[i]; 2190 fv_word_idx++; 2191 } 2192 /* populate rg_list with the data from the child entry of this 2193 * recipe 2194 */ 2195 list_add(&rg_entry->l_entry, &recps[rid].rg_list); 2196 2197 /* Propagate some data to the recipe database */ 2198 recps[idx].is_root = !!is_root; 2199 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; 2200 bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); 2201 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { 2202 recps[idx].chain_idx = root_bufs.content.result_indx & 2203 ~ICE_AQ_RECIPE_RESULT_EN; 2204 set_bit(recps[idx].chain_idx, recps[idx].res_idxs); 2205 } else { 2206 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND; 2207 } 2208 2209 if (!is_root) 2210 continue; 2211 2212 /* Only do the following for root recipes entries */ 2213 memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap, 2214 sizeof(recps[idx].r_bitmap)); 2215 recps[idx].root_rid = root_bufs.content.rid & 2216 ~ICE_AQ_RECIPE_ID_IS_ROOT; 2217 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; 2218 } 2219 2220 /* Complete initialization of the root recipe entry */ 2221 lkup_exts->n_val_words = fv_word_idx; 2222 recps[rid].big_recp = (num_recps > 1); 2223 recps[rid].n_grp_count = (u8)num_recps; 2224 recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp, 2225 recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), 2226 GFP_KERNEL); 2227 if (!recps[rid].root_buf) { 2228 status = -ENOMEM; 2229 goto err_unroll; 2230 } 2231 2232 /* Copy result indexes */ 2233 bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); 2234 recps[rid].recp_created = true; 2235 2236 err_unroll: 2237 kfree(tmp); 2238 return status; 2239 } 2240 2241 /* ice_init_port_info - Initialize port_info with switch configuration data 2242 * @pi: pointer to port_info 2243 * @vsi_port_num: VSI number or port number 2244 * @type: Type of switch element (port or VSI) 2245 * @swid: switch ID of the switch the element is attached to 2246 * @pf_vf_num: PF or VF number 2247 * @is_vf: true if the element is a VF, false otherwise 2248 */ 2249 static void 2250 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, 2251 u16 swid, u16 pf_vf_num, bool is_vf) 2252 { 2253 switch (type) { 2254 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT: 2255 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK); 2256 pi->sw_id = swid; 2257 pi->pf_vf_num = pf_vf_num; 2258 pi->is_vf = is_vf; 2259 break; 2260 default: 2261 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n"); 2262 break; 2263 } 2264 } 2265 2266 /* ice_get_initial_sw_cfg - Get initial port and default VSI data 2267 * @hw: pointer to the hardware structure 2268 */ 2269 int ice_get_initial_sw_cfg(struct ice_hw *hw) 2270 { 2271 struct ice_aqc_get_sw_cfg_resp_elem *rbuf; 2272 u16 req_desc = 0; 2273 u16 num_elems; 2274 int status; 2275 u16 i; 2276 2277 rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL); 2278 if (!rbuf) 2279 return -ENOMEM; 2280 2281 /* Multiple calls to ice_aq_get_sw_cfg may be required 2282 * to get all the switch configuration information. The need 2283 * for additional calls is indicated by ice_aq_get_sw_cfg 2284 * writing a non-zero value in req_desc 2285 */ 2286 do { 2287 struct ice_aqc_get_sw_cfg_resp_elem *ele; 2288 2289 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN, 2290 &req_desc, &num_elems, NULL); 2291 2292 if (status) 2293 break; 2294 2295 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) { 2296 u16 pf_vf_num, swid, vsi_port_num; 2297 bool is_vf = false; 2298 u8 res_type; 2299 2300 vsi_port_num = le16_to_cpu(ele->vsi_port_num) & 2301 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M; 2302 2303 pf_vf_num = le16_to_cpu(ele->pf_vf_num) & 2304 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M; 2305 2306 swid = le16_to_cpu(ele->swid); 2307 2308 if (le16_to_cpu(ele->pf_vf_num) & 2309 ICE_AQC_GET_SW_CONF_RESP_IS_VF) 2310 is_vf = true; 2311 2312 res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >> 2313 ICE_AQC_GET_SW_CONF_RESP_TYPE_S); 2314 2315 if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) { 2316 /* FW VSI is not needed. Just continue. */ 2317 continue; 2318 } 2319 2320 ice_init_port_info(hw->port_info, vsi_port_num, 2321 res_type, swid, pf_vf_num, is_vf); 2322 } 2323 } while (req_desc && !status); 2324 2325 kfree(rbuf); 2326 return status; 2327 } 2328 2329 /** 2330 * ice_fill_sw_info - Helper function to populate lb_en and lan_en 2331 * @hw: pointer to the hardware structure 2332 * @fi: filter info structure to fill/update 2333 * 2334 * This helper function populates the lb_en and lan_en elements of the provided 2335 * ice_fltr_info struct using the switch's type and characteristics of the 2336 * switch rule being configured. 2337 */ 2338 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) 2339 { 2340 fi->lb_en = false; 2341 fi->lan_en = false; 2342 if ((fi->flag & ICE_FLTR_TX) && 2343 (fi->fltr_act == ICE_FWD_TO_VSI || 2344 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 2345 fi->fltr_act == ICE_FWD_TO_Q || 2346 fi->fltr_act == ICE_FWD_TO_QGRP)) { 2347 /* Setting LB for prune actions will result in replicated 2348 * packets to the internal switch that will be dropped. 2349 */ 2350 if (fi->lkup_type != ICE_SW_LKUP_VLAN) 2351 fi->lb_en = true; 2352 2353 /* Set lan_en to TRUE if 2354 * 1. The switch is a VEB AND 2355 * 2 2356 * 2.1 The lookup is a directional lookup like ethertype, 2357 * promiscuous, ethertype-MAC, promiscuous-VLAN 2358 * and default-port OR 2359 * 2.2 The lookup is VLAN, OR 2360 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR 2361 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC. 2362 * 2363 * OR 2364 * 2365 * The switch is a VEPA. 2366 * 2367 * In all other cases, the LAN enable has to be set to false. 2368 */ 2369 if (hw->evb_veb) { 2370 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE || 2371 fi->lkup_type == ICE_SW_LKUP_PROMISC || 2372 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 2373 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN || 2374 fi->lkup_type == ICE_SW_LKUP_DFLT || 2375 fi->lkup_type == ICE_SW_LKUP_VLAN || 2376 (fi->lkup_type == ICE_SW_LKUP_MAC && 2377 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) || 2378 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && 2379 !is_unicast_ether_addr(fi->l_data.mac.mac_addr))) 2380 fi->lan_en = true; 2381 } else { 2382 fi->lan_en = true; 2383 } 2384 } 2385 } 2386 2387 /** 2388 * ice_fill_sw_rule - Helper function to fill switch rule structure 2389 * @hw: pointer to the hardware structure 2390 * @f_info: entry containing packet forwarding information 2391 * @s_rule: switch rule structure to be filled in based on mac_entry 2392 * @opc: switch rules population command type - pass in the command opcode 2393 */ 2394 static void 2395 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, 2396 struct ice_sw_rule_lkup_rx_tx *s_rule, 2397 enum ice_adminq_opc opc) 2398 { 2399 u16 vlan_id = ICE_MAX_VLAN_ID + 1; 2400 u16 vlan_tpid = ETH_P_8021Q; 2401 void *daddr = NULL; 2402 u16 eth_hdr_sz; 2403 u8 *eth_hdr; 2404 u32 act = 0; 2405 __be16 *off; 2406 u8 q_rgn; 2407 2408 if (opc == ice_aqc_opc_remove_sw_rules) { 2409 s_rule->act = 0; 2410 s_rule->index = cpu_to_le16(f_info->fltr_rule_id); 2411 s_rule->hdr_len = 0; 2412 return; 2413 } 2414 2415 eth_hdr_sz = sizeof(dummy_eth_header); 2416 eth_hdr = s_rule->hdr_data; 2417 2418 /* initialize the ether header with a dummy header */ 2419 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz); 2420 ice_fill_sw_info(hw, f_info); 2421 2422 switch (f_info->fltr_act) { 2423 case ICE_FWD_TO_VSI: 2424 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & 2425 ICE_SINGLE_ACT_VSI_ID_M; 2426 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 2427 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 2428 ICE_SINGLE_ACT_VALID_BIT; 2429 break; 2430 case ICE_FWD_TO_VSI_LIST: 2431 act |= ICE_SINGLE_ACT_VSI_LIST; 2432 act |= (f_info->fwd_id.vsi_list_id << 2433 ICE_SINGLE_ACT_VSI_LIST_ID_S) & 2434 ICE_SINGLE_ACT_VSI_LIST_ID_M; 2435 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 2436 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 2437 ICE_SINGLE_ACT_VALID_BIT; 2438 break; 2439 case ICE_FWD_TO_Q: 2440 act |= ICE_SINGLE_ACT_TO_Q; 2441 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 2442 ICE_SINGLE_ACT_Q_INDEX_M; 2443 break; 2444 case ICE_DROP_PACKET: 2445 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 2446 ICE_SINGLE_ACT_VALID_BIT; 2447 break; 2448 case ICE_FWD_TO_QGRP: 2449 q_rgn = f_info->qgrp_size > 0 ? 2450 (u8)ilog2(f_info->qgrp_size) : 0; 2451 act |= ICE_SINGLE_ACT_TO_Q; 2452 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 2453 ICE_SINGLE_ACT_Q_INDEX_M; 2454 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & 2455 ICE_SINGLE_ACT_Q_REGION_M; 2456 break; 2457 default: 2458 return; 2459 } 2460 2461 if (f_info->lb_en) 2462 act |= ICE_SINGLE_ACT_LB_ENABLE; 2463 if (f_info->lan_en) 2464 act |= ICE_SINGLE_ACT_LAN_ENABLE; 2465 2466 switch (f_info->lkup_type) { 2467 case ICE_SW_LKUP_MAC: 2468 daddr = f_info->l_data.mac.mac_addr; 2469 break; 2470 case ICE_SW_LKUP_VLAN: 2471 vlan_id = f_info->l_data.vlan.vlan_id; 2472 if (f_info->l_data.vlan.tpid_valid) 2473 vlan_tpid = f_info->l_data.vlan.tpid; 2474 if (f_info->fltr_act == ICE_FWD_TO_VSI || 2475 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { 2476 act |= ICE_SINGLE_ACT_PRUNE; 2477 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS; 2478 } 2479 break; 2480 case ICE_SW_LKUP_ETHERTYPE_MAC: 2481 daddr = f_info->l_data.ethertype_mac.mac_addr; 2482 fallthrough; 2483 case ICE_SW_LKUP_ETHERTYPE: 2484 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); 2485 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); 2486 break; 2487 case ICE_SW_LKUP_MAC_VLAN: 2488 daddr = f_info->l_data.mac_vlan.mac_addr; 2489 vlan_id = f_info->l_data.mac_vlan.vlan_id; 2490 break; 2491 case ICE_SW_LKUP_PROMISC_VLAN: 2492 vlan_id = f_info->l_data.mac_vlan.vlan_id; 2493 fallthrough; 2494 case ICE_SW_LKUP_PROMISC: 2495 daddr = f_info->l_data.mac_vlan.mac_addr; 2496 break; 2497 default: 2498 break; 2499 } 2500 2501 s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ? 2502 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) : 2503 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); 2504 2505 /* Recipe set depending on lookup type */ 2506 s_rule->recipe_id = cpu_to_le16(f_info->lkup_type); 2507 s_rule->src = cpu_to_le16(f_info->src); 2508 s_rule->act = cpu_to_le32(act); 2509 2510 if (daddr) 2511 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr); 2512 2513 if (!(vlan_id > ICE_MAX_VLAN_ID)) { 2514 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); 2515 *off = cpu_to_be16(vlan_id); 2516 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); 2517 *off = cpu_to_be16(vlan_tpid); 2518 } 2519 2520 /* Create the switch rule with the final dummy Ethernet header */ 2521 if (opc != ice_aqc_opc_update_sw_rules) 2522 s_rule->hdr_len = cpu_to_le16(eth_hdr_sz); 2523 } 2524 2525 /** 2526 * ice_add_marker_act 2527 * @hw: pointer to the hardware structure 2528 * @m_ent: the management entry for which sw marker needs to be added 2529 * @sw_marker: sw marker to tag the Rx descriptor with 2530 * @l_id: large action resource ID 2531 * 2532 * Create a large action to hold software marker and update the switch rule 2533 * entry pointed by m_ent with newly created large action 2534 */ 2535 static int 2536 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, 2537 u16 sw_marker, u16 l_id) 2538 { 2539 struct ice_sw_rule_lkup_rx_tx *rx_tx; 2540 struct ice_sw_rule_lg_act *lg_act; 2541 /* For software marker we need 3 large actions 2542 * 1. FWD action: FWD TO VSI or VSI LIST 2543 * 2. GENERIC VALUE action to hold the profile ID 2544 * 3. GENERIC VALUE action to hold the software marker ID 2545 */ 2546 const u16 num_lg_acts = 3; 2547 u16 lg_act_size; 2548 u16 rules_size; 2549 int status; 2550 u32 act; 2551 u16 id; 2552 2553 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) 2554 return -EINVAL; 2555 2556 /* Create two back-to-back switch rules and submit them to the HW using 2557 * one memory buffer: 2558 * 1. Large Action 2559 * 2. Look up Tx Rx 2560 */ 2561 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts); 2562 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx); 2563 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); 2564 if (!lg_act) 2565 return -ENOMEM; 2566 2567 rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size); 2568 2569 /* Fill in the first switch rule i.e. large action */ 2570 lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT); 2571 lg_act->index = cpu_to_le16(l_id); 2572 lg_act->size = cpu_to_le16(num_lg_acts); 2573 2574 /* First action VSI forwarding or VSI list forwarding depending on how 2575 * many VSIs 2576 */ 2577 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id : 2578 m_ent->fltr_info.fwd_id.hw_vsi_id; 2579 2580 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; 2581 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; 2582 if (m_ent->vsi_count > 1) 2583 act |= ICE_LG_ACT_VSI_LIST; 2584 lg_act->act[0] = cpu_to_le32(act); 2585 2586 /* Second action descriptor type */ 2587 act = ICE_LG_ACT_GENERIC; 2588 2589 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; 2590 lg_act->act[1] = cpu_to_le32(act); 2591 2592 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << 2593 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; 2594 2595 /* Third action Marker value */ 2596 act |= ICE_LG_ACT_GENERIC; 2597 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & 2598 ICE_LG_ACT_GENERIC_VALUE_M; 2599 2600 lg_act->act[2] = cpu_to_le32(act); 2601 2602 /* call the fill switch rule to fill the lookup Tx Rx structure */ 2603 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, 2604 ice_aqc_opc_update_sw_rules); 2605 2606 /* Update the action to point to the large action ID */ 2607 rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR | 2608 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & 2609 ICE_SINGLE_ACT_PTR_VAL_M)); 2610 2611 /* Use the filter rule ID of the previously created rule with single 2612 * act. Once the update happens, hardware will treat this as large 2613 * action 2614 */ 2615 rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id); 2616 2617 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2, 2618 ice_aqc_opc_update_sw_rules, NULL); 2619 if (!status) { 2620 m_ent->lg_act_idx = l_id; 2621 m_ent->sw_marker_id = sw_marker; 2622 } 2623 2624 devm_kfree(ice_hw_to_dev(hw), lg_act); 2625 return status; 2626 } 2627 2628 /** 2629 * ice_create_vsi_list_map 2630 * @hw: pointer to the hardware structure 2631 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping 2632 * @num_vsi: number of VSI handles in the array 2633 * @vsi_list_id: VSI list ID generated as part of allocate resource 2634 * 2635 * Helper function to create a new entry of VSI list ID to VSI mapping 2636 * using the given VSI list ID 2637 */ 2638 static struct ice_vsi_list_map_info * 2639 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 2640 u16 vsi_list_id) 2641 { 2642 struct ice_switch_info *sw = hw->switch_info; 2643 struct ice_vsi_list_map_info *v_map; 2644 int i; 2645 2646 v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL); 2647 if (!v_map) 2648 return NULL; 2649 2650 v_map->vsi_list_id = vsi_list_id; 2651 v_map->ref_cnt = 1; 2652 for (i = 0; i < num_vsi; i++) 2653 set_bit(vsi_handle_arr[i], v_map->vsi_map); 2654 2655 list_add(&v_map->list_entry, &sw->vsi_list_map_head); 2656 return v_map; 2657 } 2658 2659 /** 2660 * ice_update_vsi_list_rule 2661 * @hw: pointer to the hardware structure 2662 * @vsi_handle_arr: array of VSI handles to form a VSI list 2663 * @num_vsi: number of VSI handles in the array 2664 * @vsi_list_id: VSI list ID generated as part of allocate resource 2665 * @remove: Boolean value to indicate if this is a remove action 2666 * @opc: switch rules population command type - pass in the command opcode 2667 * @lkup_type: lookup type of the filter 2668 * 2669 * Call AQ command to add a new switch rule or update existing switch rule 2670 * using the given VSI list ID 2671 */ 2672 static int 2673 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 2674 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, 2675 enum ice_sw_lkup_type lkup_type) 2676 { 2677 struct ice_sw_rule_vsi_list *s_rule; 2678 u16 s_rule_size; 2679 u16 rule_type; 2680 int status; 2681 int i; 2682 2683 if (!num_vsi) 2684 return -EINVAL; 2685 2686 if (lkup_type == ICE_SW_LKUP_MAC || 2687 lkup_type == ICE_SW_LKUP_MAC_VLAN || 2688 lkup_type == ICE_SW_LKUP_ETHERTYPE || 2689 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 2690 lkup_type == ICE_SW_LKUP_PROMISC || 2691 lkup_type == ICE_SW_LKUP_PROMISC_VLAN || 2692 lkup_type == ICE_SW_LKUP_DFLT) 2693 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : 2694 ICE_AQC_SW_RULES_T_VSI_LIST_SET; 2695 else if (lkup_type == ICE_SW_LKUP_VLAN) 2696 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : 2697 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; 2698 else 2699 return -EINVAL; 2700 2701 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi); 2702 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 2703 if (!s_rule) 2704 return -ENOMEM; 2705 for (i = 0; i < num_vsi; i++) { 2706 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { 2707 status = -EINVAL; 2708 goto exit; 2709 } 2710 /* AQ call requires hw_vsi_id(s) */ 2711 s_rule->vsi[i] = 2712 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); 2713 } 2714 2715 s_rule->hdr.type = cpu_to_le16(rule_type); 2716 s_rule->number_vsi = cpu_to_le16(num_vsi); 2717 s_rule->index = cpu_to_le16(vsi_list_id); 2718 2719 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL); 2720 2721 exit: 2722 devm_kfree(ice_hw_to_dev(hw), s_rule); 2723 return status; 2724 } 2725 2726 /** 2727 * ice_create_vsi_list_rule - Creates and populates a VSI list rule 2728 * @hw: pointer to the HW struct 2729 * @vsi_handle_arr: array of VSI handles to form a VSI list 2730 * @num_vsi: number of VSI handles in the array 2731 * @vsi_list_id: stores the ID of the VSI list to be created 2732 * @lkup_type: switch rule filter's lookup type 2733 */ 2734 static int 2735 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 2736 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) 2737 { 2738 int status; 2739 2740 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, 2741 ice_aqc_opc_alloc_res); 2742 if (status) 2743 return status; 2744 2745 /* Update the newly created VSI list to include the specified VSIs */ 2746 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi, 2747 *vsi_list_id, false, 2748 ice_aqc_opc_add_sw_rules, lkup_type); 2749 } 2750 2751 /** 2752 * ice_create_pkt_fwd_rule 2753 * @hw: pointer to the hardware structure 2754 * @f_entry: entry containing packet forwarding information 2755 * 2756 * Create switch rule with given filter information and add an entry 2757 * to the corresponding filter management list to track this switch rule 2758 * and VSI mapping 2759 */ 2760 static int 2761 ice_create_pkt_fwd_rule(struct ice_hw *hw, 2762 struct ice_fltr_list_entry *f_entry) 2763 { 2764 struct ice_fltr_mgmt_list_entry *fm_entry; 2765 struct ice_sw_rule_lkup_rx_tx *s_rule; 2766 enum ice_sw_lkup_type l_type; 2767 struct ice_sw_recipe *recp; 2768 int status; 2769 2770 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 2771 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 2772 GFP_KERNEL); 2773 if (!s_rule) 2774 return -ENOMEM; 2775 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), 2776 GFP_KERNEL); 2777 if (!fm_entry) { 2778 status = -ENOMEM; 2779 goto ice_create_pkt_fwd_rule_exit; 2780 } 2781 2782 fm_entry->fltr_info = f_entry->fltr_info; 2783 2784 /* Initialize all the fields for the management entry */ 2785 fm_entry->vsi_count = 1; 2786 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX; 2787 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID; 2788 fm_entry->counter_index = ICE_INVAL_COUNTER_ID; 2789 2790 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule, 2791 ice_aqc_opc_add_sw_rules); 2792 2793 status = ice_aq_sw_rules(hw, s_rule, 2794 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1, 2795 ice_aqc_opc_add_sw_rules, NULL); 2796 if (status) { 2797 devm_kfree(ice_hw_to_dev(hw), fm_entry); 2798 goto ice_create_pkt_fwd_rule_exit; 2799 } 2800 2801 f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index); 2802 fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index); 2803 2804 /* The book keeping entries will get removed when base driver 2805 * calls remove filter AQ command 2806 */ 2807 l_type = fm_entry->fltr_info.lkup_type; 2808 recp = &hw->switch_info->recp_list[l_type]; 2809 list_add(&fm_entry->list_entry, &recp->filt_rules); 2810 2811 ice_create_pkt_fwd_rule_exit: 2812 devm_kfree(ice_hw_to_dev(hw), s_rule); 2813 return status; 2814 } 2815 2816 /** 2817 * ice_update_pkt_fwd_rule 2818 * @hw: pointer to the hardware structure 2819 * @f_info: filter information for switch rule 2820 * 2821 * Call AQ command to update a previously created switch rule with a 2822 * VSI list ID 2823 */ 2824 static int 2825 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) 2826 { 2827 struct ice_sw_rule_lkup_rx_tx *s_rule; 2828 int status; 2829 2830 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 2831 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 2832 GFP_KERNEL); 2833 if (!s_rule) 2834 return -ENOMEM; 2835 2836 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); 2837 2838 s_rule->index = cpu_to_le16(f_info->fltr_rule_id); 2839 2840 /* Update switch rule with new rule set to forward VSI list */ 2841 status = ice_aq_sw_rules(hw, s_rule, 2842 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1, 2843 ice_aqc_opc_update_sw_rules, NULL); 2844 2845 devm_kfree(ice_hw_to_dev(hw), s_rule); 2846 return status; 2847 } 2848 2849 /** 2850 * ice_update_sw_rule_bridge_mode 2851 * @hw: pointer to the HW struct 2852 * 2853 * Updates unicast switch filter rules based on VEB/VEPA mode 2854 */ 2855 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw) 2856 { 2857 struct ice_switch_info *sw = hw->switch_info; 2858 struct ice_fltr_mgmt_list_entry *fm_entry; 2859 struct list_head *rule_head; 2860 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2861 int status = 0; 2862 2863 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 2864 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 2865 2866 mutex_lock(rule_lock); 2867 list_for_each_entry(fm_entry, rule_head, list_entry) { 2868 struct ice_fltr_info *fi = &fm_entry->fltr_info; 2869 u8 *addr = fi->l_data.mac.mac_addr; 2870 2871 /* Update unicast Tx rules to reflect the selected 2872 * VEB/VEPA mode 2873 */ 2874 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) && 2875 (fi->fltr_act == ICE_FWD_TO_VSI || 2876 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 2877 fi->fltr_act == ICE_FWD_TO_Q || 2878 fi->fltr_act == ICE_FWD_TO_QGRP)) { 2879 status = ice_update_pkt_fwd_rule(hw, fi); 2880 if (status) 2881 break; 2882 } 2883 } 2884 2885 mutex_unlock(rule_lock); 2886 2887 return status; 2888 } 2889 2890 /** 2891 * ice_add_update_vsi_list 2892 * @hw: pointer to the hardware structure 2893 * @m_entry: pointer to current filter management list entry 2894 * @cur_fltr: filter information from the book keeping entry 2895 * @new_fltr: filter information with the new VSI to be added 2896 * 2897 * Call AQ command to add or update previously created VSI list with new VSI. 2898 * 2899 * Helper function to do book keeping associated with adding filter information 2900 * The algorithm to do the book keeping is described below : 2901 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.) 2902 * if only one VSI has been added till now 2903 * Allocate a new VSI list and add two VSIs 2904 * to this list using switch rule command 2905 * Update the previously created switch rule with the 2906 * newly created VSI list ID 2907 * if a VSI list was previously created 2908 * Add the new VSI to the previously created VSI list set 2909 * using the update switch rule command 2910 */ 2911 static int 2912 ice_add_update_vsi_list(struct ice_hw *hw, 2913 struct ice_fltr_mgmt_list_entry *m_entry, 2914 struct ice_fltr_info *cur_fltr, 2915 struct ice_fltr_info *new_fltr) 2916 { 2917 u16 vsi_list_id = 0; 2918 int status = 0; 2919 2920 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || 2921 cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) 2922 return -EOPNOTSUPP; 2923 2924 if ((new_fltr->fltr_act == ICE_FWD_TO_Q || 2925 new_fltr->fltr_act == ICE_FWD_TO_QGRP) && 2926 (cur_fltr->fltr_act == ICE_FWD_TO_VSI || 2927 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) 2928 return -EOPNOTSUPP; 2929 2930 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { 2931 /* Only one entry existed in the mapping and it was not already 2932 * a part of a VSI list. So, create a VSI list with the old and 2933 * new VSIs. 2934 */ 2935 struct ice_fltr_info tmp_fltr; 2936 u16 vsi_handle_arr[2]; 2937 2938 /* A rule already exists with the new VSI being added */ 2939 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) 2940 return -EEXIST; 2941 2942 vsi_handle_arr[0] = cur_fltr->vsi_handle; 2943 vsi_handle_arr[1] = new_fltr->vsi_handle; 2944 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 2945 &vsi_list_id, 2946 new_fltr->lkup_type); 2947 if (status) 2948 return status; 2949 2950 tmp_fltr = *new_fltr; 2951 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; 2952 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 2953 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 2954 /* Update the previous switch rule of "MAC forward to VSI" to 2955 * "MAC fwd to VSI list" 2956 */ 2957 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 2958 if (status) 2959 return status; 2960 2961 cur_fltr->fwd_id.vsi_list_id = vsi_list_id; 2962 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 2963 m_entry->vsi_list_info = 2964 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 2965 vsi_list_id); 2966 2967 if (!m_entry->vsi_list_info) 2968 return -ENOMEM; 2969 2970 /* If this entry was large action then the large action needs 2971 * to be updated to point to FWD to VSI list 2972 */ 2973 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) 2974 status = 2975 ice_add_marker_act(hw, m_entry, 2976 m_entry->sw_marker_id, 2977 m_entry->lg_act_idx); 2978 } else { 2979 u16 vsi_handle = new_fltr->vsi_handle; 2980 enum ice_adminq_opc opcode; 2981 2982 if (!m_entry->vsi_list_info) 2983 return -EIO; 2984 2985 /* A rule already exists with the new VSI being added */ 2986 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 2987 return 0; 2988 2989 /* Update the previously created VSI list set with 2990 * the new VSI ID passed in 2991 */ 2992 vsi_list_id = cur_fltr->fwd_id.vsi_list_id; 2993 opcode = ice_aqc_opc_update_sw_rules; 2994 2995 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, 2996 vsi_list_id, false, opcode, 2997 new_fltr->lkup_type); 2998 /* update VSI list mapping info with new VSI ID */ 2999 if (!status) 3000 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); 3001 } 3002 if (!status) 3003 m_entry->vsi_count++; 3004 return status; 3005 } 3006 3007 /** 3008 * ice_find_rule_entry - Search a rule entry 3009 * @hw: pointer to the hardware structure 3010 * @recp_id: lookup type for which the specified rule needs to be searched 3011 * @f_info: rule information 3012 * 3013 * Helper function to search for a given rule entry 3014 * Returns pointer to entry storing the rule if found 3015 */ 3016 static struct ice_fltr_mgmt_list_entry * 3017 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info) 3018 { 3019 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL; 3020 struct ice_switch_info *sw = hw->switch_info; 3021 struct list_head *list_head; 3022 3023 list_head = &sw->recp_list[recp_id].filt_rules; 3024 list_for_each_entry(list_itr, list_head, list_entry) { 3025 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 3026 sizeof(f_info->l_data)) && 3027 f_info->flag == list_itr->fltr_info.flag) { 3028 ret = list_itr; 3029 break; 3030 } 3031 } 3032 return ret; 3033 } 3034 3035 /** 3036 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1 3037 * @hw: pointer to the hardware structure 3038 * @recp_id: lookup type for which VSI lists needs to be searched 3039 * @vsi_handle: VSI handle to be found in VSI list 3040 * @vsi_list_id: VSI list ID found containing vsi_handle 3041 * 3042 * Helper function to search a VSI list with single entry containing given VSI 3043 * handle element. This can be extended further to search VSI list with more 3044 * than 1 vsi_count. Returns pointer to VSI list entry if found. 3045 */ 3046 static struct ice_vsi_list_map_info * 3047 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, 3048 u16 *vsi_list_id) 3049 { 3050 struct ice_vsi_list_map_info *map_info = NULL; 3051 struct ice_switch_info *sw = hw->switch_info; 3052 struct ice_fltr_mgmt_list_entry *list_itr; 3053 struct list_head *list_head; 3054 3055 list_head = &sw->recp_list[recp_id].filt_rules; 3056 list_for_each_entry(list_itr, list_head, list_entry) { 3057 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { 3058 map_info = list_itr->vsi_list_info; 3059 if (test_bit(vsi_handle, map_info->vsi_map)) { 3060 *vsi_list_id = map_info->vsi_list_id; 3061 return map_info; 3062 } 3063 } 3064 } 3065 return NULL; 3066 } 3067 3068 /** 3069 * ice_add_rule_internal - add rule for a given lookup type 3070 * @hw: pointer to the hardware structure 3071 * @recp_id: lookup type (recipe ID) for which rule has to be added 3072 * @f_entry: structure containing MAC forwarding information 3073 * 3074 * Adds or updates the rule lists for a given recipe 3075 */ 3076 static int 3077 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, 3078 struct ice_fltr_list_entry *f_entry) 3079 { 3080 struct ice_switch_info *sw = hw->switch_info; 3081 struct ice_fltr_info *new_fltr, *cur_fltr; 3082 struct ice_fltr_mgmt_list_entry *m_entry; 3083 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3084 int status = 0; 3085 3086 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 3087 return -EINVAL; 3088 f_entry->fltr_info.fwd_id.hw_vsi_id = 3089 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 3090 3091 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 3092 3093 mutex_lock(rule_lock); 3094 new_fltr = &f_entry->fltr_info; 3095 if (new_fltr->flag & ICE_FLTR_RX) 3096 new_fltr->src = hw->port_info->lport; 3097 else if (new_fltr->flag & ICE_FLTR_TX) 3098 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id; 3099 3100 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr); 3101 if (!m_entry) { 3102 mutex_unlock(rule_lock); 3103 return ice_create_pkt_fwd_rule(hw, f_entry); 3104 } 3105 3106 cur_fltr = &m_entry->fltr_info; 3107 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr); 3108 mutex_unlock(rule_lock); 3109 3110 return status; 3111 } 3112 3113 /** 3114 * ice_remove_vsi_list_rule 3115 * @hw: pointer to the hardware structure 3116 * @vsi_list_id: VSI list ID generated as part of allocate resource 3117 * @lkup_type: switch rule filter lookup type 3118 * 3119 * The VSI list should be emptied before this function is called to remove the 3120 * VSI list. 3121 */ 3122 static int 3123 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, 3124 enum ice_sw_lkup_type lkup_type) 3125 { 3126 struct ice_sw_rule_vsi_list *s_rule; 3127 u16 s_rule_size; 3128 int status; 3129 3130 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0); 3131 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 3132 if (!s_rule) 3133 return -ENOMEM; 3134 3135 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); 3136 s_rule->index = cpu_to_le16(vsi_list_id); 3137 3138 /* Free the vsi_list resource that we allocated. It is assumed that the 3139 * list is empty at this point. 3140 */ 3141 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, 3142 ice_aqc_opc_free_res); 3143 3144 devm_kfree(ice_hw_to_dev(hw), s_rule); 3145 return status; 3146 } 3147 3148 /** 3149 * ice_rem_update_vsi_list 3150 * @hw: pointer to the hardware structure 3151 * @vsi_handle: VSI handle of the VSI to remove 3152 * @fm_list: filter management entry for which the VSI list management needs to 3153 * be done 3154 */ 3155 static int 3156 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, 3157 struct ice_fltr_mgmt_list_entry *fm_list) 3158 { 3159 enum ice_sw_lkup_type lkup_type; 3160 u16 vsi_list_id; 3161 int status = 0; 3162 3163 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || 3164 fm_list->vsi_count == 0) 3165 return -EINVAL; 3166 3167 /* A rule with the VSI being removed does not exist */ 3168 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) 3169 return -ENOENT; 3170 3171 lkup_type = fm_list->fltr_info.lkup_type; 3172 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; 3173 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, 3174 ice_aqc_opc_update_sw_rules, 3175 lkup_type); 3176 if (status) 3177 return status; 3178 3179 fm_list->vsi_count--; 3180 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 3181 3182 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) { 3183 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info; 3184 struct ice_vsi_list_map_info *vsi_list_info = 3185 fm_list->vsi_list_info; 3186 u16 rem_vsi_handle; 3187 3188 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, 3189 ICE_MAX_VSI); 3190 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 3191 return -EIO; 3192 3193 /* Make sure VSI list is empty before removing it below */ 3194 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 3195 vsi_list_id, true, 3196 ice_aqc_opc_update_sw_rules, 3197 lkup_type); 3198 if (status) 3199 return status; 3200 3201 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI; 3202 tmp_fltr_info.fwd_id.hw_vsi_id = 3203 ice_get_hw_vsi_num(hw, rem_vsi_handle); 3204 tmp_fltr_info.vsi_handle = rem_vsi_handle; 3205 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); 3206 if (status) { 3207 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", 3208 tmp_fltr_info.fwd_id.hw_vsi_id, status); 3209 return status; 3210 } 3211 3212 fm_list->fltr_info = tmp_fltr_info; 3213 } 3214 3215 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || 3216 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { 3217 struct ice_vsi_list_map_info *vsi_list_info = 3218 fm_list->vsi_list_info; 3219 3220 /* Remove the VSI list since it is no longer used */ 3221 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 3222 if (status) { 3223 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", 3224 vsi_list_id, status); 3225 return status; 3226 } 3227 3228 list_del(&vsi_list_info->list_entry); 3229 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 3230 fm_list->vsi_list_info = NULL; 3231 } 3232 3233 return status; 3234 } 3235 3236 /** 3237 * ice_remove_rule_internal - Remove a filter rule of a given type 3238 * @hw: pointer to the hardware structure 3239 * @recp_id: recipe ID for which the rule needs to removed 3240 * @f_entry: rule entry containing filter information 3241 */ 3242 static int 3243 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, 3244 struct ice_fltr_list_entry *f_entry) 3245 { 3246 struct ice_switch_info *sw = hw->switch_info; 3247 struct ice_fltr_mgmt_list_entry *list_elem; 3248 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3249 bool remove_rule = false; 3250 u16 vsi_handle; 3251 int status = 0; 3252 3253 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 3254 return -EINVAL; 3255 f_entry->fltr_info.fwd_id.hw_vsi_id = 3256 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 3257 3258 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 3259 mutex_lock(rule_lock); 3260 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); 3261 if (!list_elem) { 3262 status = -ENOENT; 3263 goto exit; 3264 } 3265 3266 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { 3267 remove_rule = true; 3268 } else if (!list_elem->vsi_list_info) { 3269 status = -ENOENT; 3270 goto exit; 3271 } else if (list_elem->vsi_list_info->ref_cnt > 1) { 3272 /* a ref_cnt > 1 indicates that the vsi_list is being 3273 * shared by multiple rules. Decrement the ref_cnt and 3274 * remove this rule, but do not modify the list, as it 3275 * is in-use by other rules. 3276 */ 3277 list_elem->vsi_list_info->ref_cnt--; 3278 remove_rule = true; 3279 } else { 3280 /* a ref_cnt of 1 indicates the vsi_list is only used 3281 * by one rule. However, the original removal request is only 3282 * for a single VSI. Update the vsi_list first, and only 3283 * remove the rule if there are no further VSIs in this list. 3284 */ 3285 vsi_handle = f_entry->fltr_info.vsi_handle; 3286 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); 3287 if (status) 3288 goto exit; 3289 /* if VSI count goes to zero after updating the VSI list */ 3290 if (list_elem->vsi_count == 0) 3291 remove_rule = true; 3292 } 3293 3294 if (remove_rule) { 3295 /* Remove the lookup rule */ 3296 struct ice_sw_rule_lkup_rx_tx *s_rule; 3297 3298 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 3299 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule), 3300 GFP_KERNEL); 3301 if (!s_rule) { 3302 status = -ENOMEM; 3303 goto exit; 3304 } 3305 3306 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule, 3307 ice_aqc_opc_remove_sw_rules); 3308 3309 status = ice_aq_sw_rules(hw, s_rule, 3310 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule), 3311 1, ice_aqc_opc_remove_sw_rules, NULL); 3312 3313 /* Remove a book keeping from the list */ 3314 devm_kfree(ice_hw_to_dev(hw), s_rule); 3315 3316 if (status) 3317 goto exit; 3318 3319 list_del(&list_elem->list_entry); 3320 devm_kfree(ice_hw_to_dev(hw), list_elem); 3321 } 3322 exit: 3323 mutex_unlock(rule_lock); 3324 return status; 3325 } 3326 3327 /** 3328 * ice_mac_fltr_exist - does this MAC filter exist for given VSI 3329 * @hw: pointer to the hardware structure 3330 * @mac: MAC address to be checked (for MAC filter) 3331 * @vsi_handle: check MAC filter for this VSI 3332 */ 3333 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle) 3334 { 3335 struct ice_fltr_mgmt_list_entry *entry; 3336 struct list_head *rule_head; 3337 struct ice_switch_info *sw; 3338 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3339 u16 hw_vsi_id; 3340 3341 if (!ice_is_vsi_valid(hw, vsi_handle)) 3342 return false; 3343 3344 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3345 sw = hw->switch_info; 3346 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 3347 if (!rule_head) 3348 return false; 3349 3350 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 3351 mutex_lock(rule_lock); 3352 list_for_each_entry(entry, rule_head, list_entry) { 3353 struct ice_fltr_info *f_info = &entry->fltr_info; 3354 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 3355 3356 if (is_zero_ether_addr(mac_addr)) 3357 continue; 3358 3359 if (f_info->flag != ICE_FLTR_TX || 3360 f_info->src_id != ICE_SRC_ID_VSI || 3361 f_info->lkup_type != ICE_SW_LKUP_MAC || 3362 f_info->fltr_act != ICE_FWD_TO_VSI || 3363 hw_vsi_id != f_info->fwd_id.hw_vsi_id) 3364 continue; 3365 3366 if (ether_addr_equal(mac, mac_addr)) { 3367 mutex_unlock(rule_lock); 3368 return true; 3369 } 3370 } 3371 mutex_unlock(rule_lock); 3372 return false; 3373 } 3374 3375 /** 3376 * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI 3377 * @hw: pointer to the hardware structure 3378 * @vlan_id: VLAN ID 3379 * @vsi_handle: check MAC filter for this VSI 3380 */ 3381 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle) 3382 { 3383 struct ice_fltr_mgmt_list_entry *entry; 3384 struct list_head *rule_head; 3385 struct ice_switch_info *sw; 3386 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3387 u16 hw_vsi_id; 3388 3389 if (vlan_id > ICE_MAX_VLAN_ID) 3390 return false; 3391 3392 if (!ice_is_vsi_valid(hw, vsi_handle)) 3393 return false; 3394 3395 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3396 sw = hw->switch_info; 3397 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 3398 if (!rule_head) 3399 return false; 3400 3401 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 3402 mutex_lock(rule_lock); 3403 list_for_each_entry(entry, rule_head, list_entry) { 3404 struct ice_fltr_info *f_info = &entry->fltr_info; 3405 u16 entry_vlan_id = f_info->l_data.vlan.vlan_id; 3406 struct ice_vsi_list_map_info *map_info; 3407 3408 if (entry_vlan_id > ICE_MAX_VLAN_ID) 3409 continue; 3410 3411 if (f_info->flag != ICE_FLTR_TX || 3412 f_info->src_id != ICE_SRC_ID_VSI || 3413 f_info->lkup_type != ICE_SW_LKUP_VLAN) 3414 continue; 3415 3416 /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */ 3417 if (f_info->fltr_act != ICE_FWD_TO_VSI && 3418 f_info->fltr_act != ICE_FWD_TO_VSI_LIST) 3419 continue; 3420 3421 if (f_info->fltr_act == ICE_FWD_TO_VSI) { 3422 if (hw_vsi_id != f_info->fwd_id.hw_vsi_id) 3423 continue; 3424 } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { 3425 /* If filter_action is FWD_TO_VSI_LIST, make sure 3426 * that VSI being checked is part of VSI list 3427 */ 3428 if (entry->vsi_count == 1 && 3429 entry->vsi_list_info) { 3430 map_info = entry->vsi_list_info; 3431 if (!test_bit(vsi_handle, map_info->vsi_map)) 3432 continue; 3433 } 3434 } 3435 3436 if (vlan_id == entry_vlan_id) { 3437 mutex_unlock(rule_lock); 3438 return true; 3439 } 3440 } 3441 mutex_unlock(rule_lock); 3442 3443 return false; 3444 } 3445 3446 /** 3447 * ice_add_mac - Add a MAC address based filter rule 3448 * @hw: pointer to the hardware structure 3449 * @m_list: list of MAC addresses and forwarding information 3450 */ 3451 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list) 3452 { 3453 struct ice_fltr_list_entry *m_list_itr; 3454 int status = 0; 3455 3456 if (!m_list || !hw) 3457 return -EINVAL; 3458 3459 list_for_each_entry(m_list_itr, m_list, list_entry) { 3460 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; 3461 u16 vsi_handle; 3462 u16 hw_vsi_id; 3463 3464 m_list_itr->fltr_info.flag = ICE_FLTR_TX; 3465 vsi_handle = m_list_itr->fltr_info.vsi_handle; 3466 if (!ice_is_vsi_valid(hw, vsi_handle)) 3467 return -EINVAL; 3468 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3469 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; 3470 /* update the src in case it is VSI num */ 3471 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) 3472 return -EINVAL; 3473 m_list_itr->fltr_info.src = hw_vsi_id; 3474 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || 3475 is_zero_ether_addr(add)) 3476 return -EINVAL; 3477 3478 m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC, 3479 m_list_itr); 3480 if (m_list_itr->status) 3481 return m_list_itr->status; 3482 } 3483 3484 return status; 3485 } 3486 3487 /** 3488 * ice_add_vlan_internal - Add one VLAN based filter rule 3489 * @hw: pointer to the hardware structure 3490 * @f_entry: filter entry containing one VLAN information 3491 */ 3492 static int 3493 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) 3494 { 3495 struct ice_switch_info *sw = hw->switch_info; 3496 struct ice_fltr_mgmt_list_entry *v_list_itr; 3497 struct ice_fltr_info *new_fltr, *cur_fltr; 3498 enum ice_sw_lkup_type lkup_type; 3499 u16 vsi_list_id = 0, vsi_handle; 3500 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3501 int status = 0; 3502 3503 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 3504 return -EINVAL; 3505 3506 f_entry->fltr_info.fwd_id.hw_vsi_id = 3507 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 3508 new_fltr = &f_entry->fltr_info; 3509 3510 /* VLAN ID should only be 12 bits */ 3511 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) 3512 return -EINVAL; 3513 3514 if (new_fltr->src_id != ICE_SRC_ID_VSI) 3515 return -EINVAL; 3516 3517 new_fltr->src = new_fltr->fwd_id.hw_vsi_id; 3518 lkup_type = new_fltr->lkup_type; 3519 vsi_handle = new_fltr->vsi_handle; 3520 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 3521 mutex_lock(rule_lock); 3522 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr); 3523 if (!v_list_itr) { 3524 struct ice_vsi_list_map_info *map_info = NULL; 3525 3526 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) { 3527 /* All VLAN pruning rules use a VSI list. Check if 3528 * there is already a VSI list containing VSI that we 3529 * want to add. If found, use the same vsi_list_id for 3530 * this new VLAN rule or else create a new list. 3531 */ 3532 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN, 3533 vsi_handle, 3534 &vsi_list_id); 3535 if (!map_info) { 3536 status = ice_create_vsi_list_rule(hw, 3537 &vsi_handle, 3538 1, 3539 &vsi_list_id, 3540 lkup_type); 3541 if (status) 3542 goto exit; 3543 } 3544 /* Convert the action to forwarding to a VSI list. */ 3545 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 3546 new_fltr->fwd_id.vsi_list_id = vsi_list_id; 3547 } 3548 3549 status = ice_create_pkt_fwd_rule(hw, f_entry); 3550 if (!status) { 3551 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, 3552 new_fltr); 3553 if (!v_list_itr) { 3554 status = -ENOENT; 3555 goto exit; 3556 } 3557 /* reuse VSI list for new rule and increment ref_cnt */ 3558 if (map_info) { 3559 v_list_itr->vsi_list_info = map_info; 3560 map_info->ref_cnt++; 3561 } else { 3562 v_list_itr->vsi_list_info = 3563 ice_create_vsi_list_map(hw, &vsi_handle, 3564 1, vsi_list_id); 3565 } 3566 } 3567 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) { 3568 /* Update existing VSI list to add new VSI ID only if it used 3569 * by one VLAN rule. 3570 */ 3571 cur_fltr = &v_list_itr->fltr_info; 3572 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, 3573 new_fltr); 3574 } else { 3575 /* If VLAN rule exists and VSI list being used by this rule is 3576 * referenced by more than 1 VLAN rule. Then create a new VSI 3577 * list appending previous VSI with new VSI and update existing 3578 * VLAN rule to point to new VSI list ID 3579 */ 3580 struct ice_fltr_info tmp_fltr; 3581 u16 vsi_handle_arr[2]; 3582 u16 cur_handle; 3583 3584 /* Current implementation only supports reusing VSI list with 3585 * one VSI count. We should never hit below condition 3586 */ 3587 if (v_list_itr->vsi_count > 1 && 3588 v_list_itr->vsi_list_info->ref_cnt > 1) { 3589 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); 3590 status = -EIO; 3591 goto exit; 3592 } 3593 3594 cur_handle = 3595 find_first_bit(v_list_itr->vsi_list_info->vsi_map, 3596 ICE_MAX_VSI); 3597 3598 /* A rule already exists with the new VSI being added */ 3599 if (cur_handle == vsi_handle) { 3600 status = -EEXIST; 3601 goto exit; 3602 } 3603 3604 vsi_handle_arr[0] = cur_handle; 3605 vsi_handle_arr[1] = vsi_handle; 3606 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 3607 &vsi_list_id, lkup_type); 3608 if (status) 3609 goto exit; 3610 3611 tmp_fltr = v_list_itr->fltr_info; 3612 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id; 3613 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 3614 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 3615 /* Update the previous switch rule to a new VSI list which 3616 * includes current VSI that is requested 3617 */ 3618 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 3619 if (status) 3620 goto exit; 3621 3622 /* before overriding VSI list map info. decrement ref_cnt of 3623 * previous VSI list 3624 */ 3625 v_list_itr->vsi_list_info->ref_cnt--; 3626 3627 /* now update to newly created list */ 3628 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id; 3629 v_list_itr->vsi_list_info = 3630 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 3631 vsi_list_id); 3632 v_list_itr->vsi_count++; 3633 } 3634 3635 exit: 3636 mutex_unlock(rule_lock); 3637 return status; 3638 } 3639 3640 /** 3641 * ice_add_vlan - Add VLAN based filter rule 3642 * @hw: pointer to the hardware structure 3643 * @v_list: list of VLAN entries and forwarding information 3644 */ 3645 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) 3646 { 3647 struct ice_fltr_list_entry *v_list_itr; 3648 3649 if (!v_list || !hw) 3650 return -EINVAL; 3651 3652 list_for_each_entry(v_list_itr, v_list, list_entry) { 3653 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) 3654 return -EINVAL; 3655 v_list_itr->fltr_info.flag = ICE_FLTR_TX; 3656 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); 3657 if (v_list_itr->status) 3658 return v_list_itr->status; 3659 } 3660 return 0; 3661 } 3662 3663 /** 3664 * ice_add_eth_mac - Add ethertype and MAC based filter rule 3665 * @hw: pointer to the hardware structure 3666 * @em_list: list of ether type MAC filter, MAC is optional 3667 * 3668 * This function requires the caller to populate the entries in 3669 * the filter list with the necessary fields (including flags to 3670 * indicate Tx or Rx rules). 3671 */ 3672 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) 3673 { 3674 struct ice_fltr_list_entry *em_list_itr; 3675 3676 if (!em_list || !hw) 3677 return -EINVAL; 3678 3679 list_for_each_entry(em_list_itr, em_list, list_entry) { 3680 enum ice_sw_lkup_type l_type = 3681 em_list_itr->fltr_info.lkup_type; 3682 3683 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 3684 l_type != ICE_SW_LKUP_ETHERTYPE) 3685 return -EINVAL; 3686 3687 em_list_itr->status = ice_add_rule_internal(hw, l_type, 3688 em_list_itr); 3689 if (em_list_itr->status) 3690 return em_list_itr->status; 3691 } 3692 return 0; 3693 } 3694 3695 /** 3696 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule 3697 * @hw: pointer to the hardware structure 3698 * @em_list: list of ethertype or ethertype MAC entries 3699 */ 3700 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) 3701 { 3702 struct ice_fltr_list_entry *em_list_itr, *tmp; 3703 3704 if (!em_list || !hw) 3705 return -EINVAL; 3706 3707 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) { 3708 enum ice_sw_lkup_type l_type = 3709 em_list_itr->fltr_info.lkup_type; 3710 3711 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 3712 l_type != ICE_SW_LKUP_ETHERTYPE) 3713 return -EINVAL; 3714 3715 em_list_itr->status = ice_remove_rule_internal(hw, l_type, 3716 em_list_itr); 3717 if (em_list_itr->status) 3718 return em_list_itr->status; 3719 } 3720 return 0; 3721 } 3722 3723 /** 3724 * ice_rem_sw_rule_info 3725 * @hw: pointer to the hardware structure 3726 * @rule_head: pointer to the switch list structure that we want to delete 3727 */ 3728 static void 3729 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head) 3730 { 3731 if (!list_empty(rule_head)) { 3732 struct ice_fltr_mgmt_list_entry *entry; 3733 struct ice_fltr_mgmt_list_entry *tmp; 3734 3735 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) { 3736 list_del(&entry->list_entry); 3737 devm_kfree(ice_hw_to_dev(hw), entry); 3738 } 3739 } 3740 } 3741 3742 /** 3743 * ice_rem_adv_rule_info 3744 * @hw: pointer to the hardware structure 3745 * @rule_head: pointer to the switch list structure that we want to delete 3746 */ 3747 static void 3748 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head) 3749 { 3750 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 3751 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 3752 3753 if (list_empty(rule_head)) 3754 return; 3755 3756 list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) { 3757 list_del(&lst_itr->list_entry); 3758 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 3759 devm_kfree(ice_hw_to_dev(hw), lst_itr); 3760 } 3761 } 3762 3763 /** 3764 * ice_cfg_dflt_vsi - change state of VSI to set/clear default 3765 * @pi: pointer to the port_info structure 3766 * @vsi_handle: VSI handle to set as default 3767 * @set: true to add the above mentioned switch rule, false to remove it 3768 * @direction: ICE_FLTR_RX or ICE_FLTR_TX 3769 * 3770 * add filter rule to set/unset given VSI as default VSI for the switch 3771 * (represented by swid) 3772 */ 3773 int 3774 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set, 3775 u8 direction) 3776 { 3777 struct ice_fltr_list_entry f_list_entry; 3778 struct ice_fltr_info f_info; 3779 struct ice_hw *hw = pi->hw; 3780 u16 hw_vsi_id; 3781 int status; 3782 3783 if (!ice_is_vsi_valid(hw, vsi_handle)) 3784 return -EINVAL; 3785 3786 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3787 3788 memset(&f_info, 0, sizeof(f_info)); 3789 3790 f_info.lkup_type = ICE_SW_LKUP_DFLT; 3791 f_info.flag = direction; 3792 f_info.fltr_act = ICE_FWD_TO_VSI; 3793 f_info.fwd_id.hw_vsi_id = hw_vsi_id; 3794 f_info.vsi_handle = vsi_handle; 3795 3796 if (f_info.flag & ICE_FLTR_RX) { 3797 f_info.src = hw->port_info->lport; 3798 f_info.src_id = ICE_SRC_ID_LPORT; 3799 } else if (f_info.flag & ICE_FLTR_TX) { 3800 f_info.src_id = ICE_SRC_ID_VSI; 3801 f_info.src = hw_vsi_id; 3802 } 3803 f_list_entry.fltr_info = f_info; 3804 3805 if (set) 3806 status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT, 3807 &f_list_entry); 3808 else 3809 status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT, 3810 &f_list_entry); 3811 3812 return status; 3813 } 3814 3815 /** 3816 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter 3817 * @fm_entry: filter entry to inspect 3818 * @vsi_handle: VSI handle to compare with filter info 3819 */ 3820 static bool 3821 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) 3822 { 3823 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && 3824 fm_entry->fltr_info.vsi_handle == vsi_handle) || 3825 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && 3826 fm_entry->vsi_list_info && 3827 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map)))); 3828 } 3829 3830 /** 3831 * ice_check_if_dflt_vsi - check if VSI is default VSI 3832 * @pi: pointer to the port_info structure 3833 * @vsi_handle: vsi handle to check for in filter list 3834 * @rule_exists: indicates if there are any VSI's in the rule list 3835 * 3836 * checks if the VSI is in a default VSI list, and also indicates 3837 * if the default VSI list is empty 3838 */ 3839 bool 3840 ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, 3841 bool *rule_exists) 3842 { 3843 struct ice_fltr_mgmt_list_entry *fm_entry; 3844 struct ice_sw_recipe *recp_list; 3845 struct list_head *rule_head; 3846 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3847 bool ret = false; 3848 3849 recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT]; 3850 rule_lock = &recp_list->filt_rule_lock; 3851 rule_head = &recp_list->filt_rules; 3852 3853 mutex_lock(rule_lock); 3854 3855 if (rule_exists && !list_empty(rule_head)) 3856 *rule_exists = true; 3857 3858 list_for_each_entry(fm_entry, rule_head, list_entry) { 3859 if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) { 3860 ret = true; 3861 break; 3862 } 3863 } 3864 3865 mutex_unlock(rule_lock); 3866 3867 return ret; 3868 } 3869 3870 /** 3871 * ice_remove_mac - remove a MAC address based filter rule 3872 * @hw: pointer to the hardware structure 3873 * @m_list: list of MAC addresses and forwarding information 3874 * 3875 * This function removes either a MAC filter rule or a specific VSI from a 3876 * VSI list for a multicast MAC address. 3877 * 3878 * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should 3879 * be aware that this call will only work if all the entries passed into m_list 3880 * were added previously. It will not attempt to do a partial remove of entries 3881 * that were found. 3882 */ 3883 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) 3884 { 3885 struct ice_fltr_list_entry *list_itr, *tmp; 3886 3887 if (!m_list) 3888 return -EINVAL; 3889 3890 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { 3891 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; 3892 u16 vsi_handle; 3893 3894 if (l_type != ICE_SW_LKUP_MAC) 3895 return -EINVAL; 3896 3897 vsi_handle = list_itr->fltr_info.vsi_handle; 3898 if (!ice_is_vsi_valid(hw, vsi_handle)) 3899 return -EINVAL; 3900 3901 list_itr->fltr_info.fwd_id.hw_vsi_id = 3902 ice_get_hw_vsi_num(hw, vsi_handle); 3903 3904 list_itr->status = ice_remove_rule_internal(hw, 3905 ICE_SW_LKUP_MAC, 3906 list_itr); 3907 if (list_itr->status) 3908 return list_itr->status; 3909 } 3910 return 0; 3911 } 3912 3913 /** 3914 * ice_remove_vlan - Remove VLAN based filter rule 3915 * @hw: pointer to the hardware structure 3916 * @v_list: list of VLAN entries and forwarding information 3917 */ 3918 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) 3919 { 3920 struct ice_fltr_list_entry *v_list_itr, *tmp; 3921 3922 if (!v_list || !hw) 3923 return -EINVAL; 3924 3925 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 3926 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; 3927 3928 if (l_type != ICE_SW_LKUP_VLAN) 3929 return -EINVAL; 3930 v_list_itr->status = ice_remove_rule_internal(hw, 3931 ICE_SW_LKUP_VLAN, 3932 v_list_itr); 3933 if (v_list_itr->status) 3934 return v_list_itr->status; 3935 } 3936 return 0; 3937 } 3938 3939 /** 3940 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list 3941 * @hw: pointer to the hardware structure 3942 * @vsi_handle: VSI handle to remove filters from 3943 * @vsi_list_head: pointer to the list to add entry to 3944 * @fi: pointer to fltr_info of filter entry to copy & add 3945 * 3946 * Helper function, used when creating a list of filters to remove from 3947 * a specific VSI. The entry added to vsi_list_head is a COPY of the 3948 * original filter entry, with the exception of fltr_info.fltr_act and 3949 * fltr_info.fwd_id fields. These are set such that later logic can 3950 * extract which VSI to remove the fltr from, and pass on that information. 3951 */ 3952 static int 3953 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 3954 struct list_head *vsi_list_head, 3955 struct ice_fltr_info *fi) 3956 { 3957 struct ice_fltr_list_entry *tmp; 3958 3959 /* this memory is freed up in the caller function 3960 * once filters for this VSI are removed 3961 */ 3962 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); 3963 if (!tmp) 3964 return -ENOMEM; 3965 3966 tmp->fltr_info = *fi; 3967 3968 /* Overwrite these fields to indicate which VSI to remove filter from, 3969 * so find and remove logic can extract the information from the 3970 * list entries. Note that original entries will still have proper 3971 * values. 3972 */ 3973 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 3974 tmp->fltr_info.vsi_handle = vsi_handle; 3975 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3976 3977 list_add(&tmp->list_entry, vsi_list_head); 3978 3979 return 0; 3980 } 3981 3982 /** 3983 * ice_add_to_vsi_fltr_list - Add VSI filters to the list 3984 * @hw: pointer to the hardware structure 3985 * @vsi_handle: VSI handle to remove filters from 3986 * @lkup_list_head: pointer to the list that has certain lookup type filters 3987 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle 3988 * 3989 * Locates all filters in lkup_list_head that are used by the given VSI, 3990 * and adds COPIES of those entries to vsi_list_head (intended to be used 3991 * to remove the listed filters). 3992 * Note that this means all entries in vsi_list_head must be explicitly 3993 * deallocated by the caller when done with list. 3994 */ 3995 static int 3996 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 3997 struct list_head *lkup_list_head, 3998 struct list_head *vsi_list_head) 3999 { 4000 struct ice_fltr_mgmt_list_entry *fm_entry; 4001 int status = 0; 4002 4003 /* check to make sure VSI ID is valid and within boundary */ 4004 if (!ice_is_vsi_valid(hw, vsi_handle)) 4005 return -EINVAL; 4006 4007 list_for_each_entry(fm_entry, lkup_list_head, list_entry) { 4008 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) 4009 continue; 4010 4011 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 4012 vsi_list_head, 4013 &fm_entry->fltr_info); 4014 if (status) 4015 return status; 4016 } 4017 return status; 4018 } 4019 4020 /** 4021 * ice_determine_promisc_mask 4022 * @fi: filter info to parse 4023 * 4024 * Helper function to determine which ICE_PROMISC_ mask corresponds 4025 * to given filter into. 4026 */ 4027 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) 4028 { 4029 u16 vid = fi->l_data.mac_vlan.vlan_id; 4030 u8 *macaddr = fi->l_data.mac.mac_addr; 4031 bool is_tx_fltr = false; 4032 u8 promisc_mask = 0; 4033 4034 if (fi->flag == ICE_FLTR_TX) 4035 is_tx_fltr = true; 4036 4037 if (is_broadcast_ether_addr(macaddr)) 4038 promisc_mask |= is_tx_fltr ? 4039 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX; 4040 else if (is_multicast_ether_addr(macaddr)) 4041 promisc_mask |= is_tx_fltr ? 4042 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX; 4043 else if (is_unicast_ether_addr(macaddr)) 4044 promisc_mask |= is_tx_fltr ? 4045 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX; 4046 if (vid) 4047 promisc_mask |= is_tx_fltr ? 4048 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX; 4049 4050 return promisc_mask; 4051 } 4052 4053 /** 4054 * ice_remove_promisc - Remove promisc based filter rules 4055 * @hw: pointer to the hardware structure 4056 * @recp_id: recipe ID for which the rule needs to removed 4057 * @v_list: list of promisc entries 4058 */ 4059 static int 4060 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list) 4061 { 4062 struct ice_fltr_list_entry *v_list_itr, *tmp; 4063 4064 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 4065 v_list_itr->status = 4066 ice_remove_rule_internal(hw, recp_id, v_list_itr); 4067 if (v_list_itr->status) 4068 return v_list_itr->status; 4069 } 4070 return 0; 4071 } 4072 4073 /** 4074 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI 4075 * @hw: pointer to the hardware structure 4076 * @vsi_handle: VSI handle to clear mode 4077 * @promisc_mask: mask of promiscuous config bits to clear 4078 * @vid: VLAN ID to clear VLAN promiscuous 4079 */ 4080 int 4081 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 4082 u16 vid) 4083 { 4084 struct ice_switch_info *sw = hw->switch_info; 4085 struct ice_fltr_list_entry *fm_entry, *tmp; 4086 struct list_head remove_list_head; 4087 struct ice_fltr_mgmt_list_entry *itr; 4088 struct list_head *rule_head; 4089 struct mutex *rule_lock; /* Lock to protect filter rule list */ 4090 int status = 0; 4091 u8 recipe_id; 4092 4093 if (!ice_is_vsi_valid(hw, vsi_handle)) 4094 return -EINVAL; 4095 4096 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) 4097 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 4098 else 4099 recipe_id = ICE_SW_LKUP_PROMISC; 4100 4101 rule_head = &sw->recp_list[recipe_id].filt_rules; 4102 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock; 4103 4104 INIT_LIST_HEAD(&remove_list_head); 4105 4106 mutex_lock(rule_lock); 4107 list_for_each_entry(itr, rule_head, list_entry) { 4108 struct ice_fltr_info *fltr_info; 4109 u8 fltr_promisc_mask = 0; 4110 4111 if (!ice_vsi_uses_fltr(itr, vsi_handle)) 4112 continue; 4113 fltr_info = &itr->fltr_info; 4114 4115 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN && 4116 vid != fltr_info->l_data.mac_vlan.vlan_id) 4117 continue; 4118 4119 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info); 4120 4121 /* Skip if filter is not completely specified by given mask */ 4122 if (fltr_promisc_mask & ~promisc_mask) 4123 continue; 4124 4125 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 4126 &remove_list_head, 4127 fltr_info); 4128 if (status) { 4129 mutex_unlock(rule_lock); 4130 goto free_fltr_list; 4131 } 4132 } 4133 mutex_unlock(rule_lock); 4134 4135 status = ice_remove_promisc(hw, recipe_id, &remove_list_head); 4136 4137 free_fltr_list: 4138 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 4139 list_del(&fm_entry->list_entry); 4140 devm_kfree(ice_hw_to_dev(hw), fm_entry); 4141 } 4142 4143 return status; 4144 } 4145 4146 /** 4147 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s) 4148 * @hw: pointer to the hardware structure 4149 * @vsi_handle: VSI handle to configure 4150 * @promisc_mask: mask of promiscuous config bits 4151 * @vid: VLAN ID to set VLAN promiscuous 4152 */ 4153 int 4154 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) 4155 { 4156 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; 4157 struct ice_fltr_list_entry f_list_entry; 4158 struct ice_fltr_info new_fltr; 4159 bool is_tx_fltr; 4160 int status = 0; 4161 u16 hw_vsi_id; 4162 int pkt_type; 4163 u8 recipe_id; 4164 4165 if (!ice_is_vsi_valid(hw, vsi_handle)) 4166 return -EINVAL; 4167 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4168 4169 memset(&new_fltr, 0, sizeof(new_fltr)); 4170 4171 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) { 4172 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN; 4173 new_fltr.l_data.mac_vlan.vlan_id = vid; 4174 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 4175 } else { 4176 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC; 4177 recipe_id = ICE_SW_LKUP_PROMISC; 4178 } 4179 4180 /* Separate filters must be set for each direction/packet type 4181 * combination, so we will loop over the mask value, store the 4182 * individual type, and clear it out in the input mask as it 4183 * is found. 4184 */ 4185 while (promisc_mask) { 4186 u8 *mac_addr; 4187 4188 pkt_type = 0; 4189 is_tx_fltr = false; 4190 4191 if (promisc_mask & ICE_PROMISC_UCAST_RX) { 4192 promisc_mask &= ~ICE_PROMISC_UCAST_RX; 4193 pkt_type = UCAST_FLTR; 4194 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) { 4195 promisc_mask &= ~ICE_PROMISC_UCAST_TX; 4196 pkt_type = UCAST_FLTR; 4197 is_tx_fltr = true; 4198 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) { 4199 promisc_mask &= ~ICE_PROMISC_MCAST_RX; 4200 pkt_type = MCAST_FLTR; 4201 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) { 4202 promisc_mask &= ~ICE_PROMISC_MCAST_TX; 4203 pkt_type = MCAST_FLTR; 4204 is_tx_fltr = true; 4205 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) { 4206 promisc_mask &= ~ICE_PROMISC_BCAST_RX; 4207 pkt_type = BCAST_FLTR; 4208 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) { 4209 promisc_mask &= ~ICE_PROMISC_BCAST_TX; 4210 pkt_type = BCAST_FLTR; 4211 is_tx_fltr = true; 4212 } 4213 4214 /* Check for VLAN promiscuous flag */ 4215 if (promisc_mask & ICE_PROMISC_VLAN_RX) { 4216 promisc_mask &= ~ICE_PROMISC_VLAN_RX; 4217 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) { 4218 promisc_mask &= ~ICE_PROMISC_VLAN_TX; 4219 is_tx_fltr = true; 4220 } 4221 4222 /* Set filter DA based on packet type */ 4223 mac_addr = new_fltr.l_data.mac.mac_addr; 4224 if (pkt_type == BCAST_FLTR) { 4225 eth_broadcast_addr(mac_addr); 4226 } else if (pkt_type == MCAST_FLTR || 4227 pkt_type == UCAST_FLTR) { 4228 /* Use the dummy ether header DA */ 4229 ether_addr_copy(mac_addr, dummy_eth_header); 4230 if (pkt_type == MCAST_FLTR) 4231 mac_addr[0] |= 0x1; /* Set multicast bit */ 4232 } 4233 4234 /* Need to reset this to zero for all iterations */ 4235 new_fltr.flag = 0; 4236 if (is_tx_fltr) { 4237 new_fltr.flag |= ICE_FLTR_TX; 4238 new_fltr.src = hw_vsi_id; 4239 } else { 4240 new_fltr.flag |= ICE_FLTR_RX; 4241 new_fltr.src = hw->port_info->lport; 4242 } 4243 4244 new_fltr.fltr_act = ICE_FWD_TO_VSI; 4245 new_fltr.vsi_handle = vsi_handle; 4246 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id; 4247 f_list_entry.fltr_info = new_fltr; 4248 4249 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry); 4250 if (status) 4251 goto set_promisc_exit; 4252 } 4253 4254 set_promisc_exit: 4255 return status; 4256 } 4257 4258 /** 4259 * ice_set_vlan_vsi_promisc 4260 * @hw: pointer to the hardware structure 4261 * @vsi_handle: VSI handle to configure 4262 * @promisc_mask: mask of promiscuous config bits 4263 * @rm_vlan_promisc: Clear VLANs VSI promisc mode 4264 * 4265 * Configure VSI with all associated VLANs to given promiscuous mode(s) 4266 */ 4267 int 4268 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 4269 bool rm_vlan_promisc) 4270 { 4271 struct ice_switch_info *sw = hw->switch_info; 4272 struct ice_fltr_list_entry *list_itr, *tmp; 4273 struct list_head vsi_list_head; 4274 struct list_head *vlan_head; 4275 struct mutex *vlan_lock; /* Lock to protect filter rule list */ 4276 u16 vlan_id; 4277 int status; 4278 4279 INIT_LIST_HEAD(&vsi_list_head); 4280 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 4281 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 4282 mutex_lock(vlan_lock); 4283 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head, 4284 &vsi_list_head); 4285 mutex_unlock(vlan_lock); 4286 if (status) 4287 goto free_fltr_list; 4288 4289 list_for_each_entry(list_itr, &vsi_list_head, list_entry) { 4290 /* Avoid enabling or disabling VLAN zero twice when in double 4291 * VLAN mode 4292 */ 4293 if (ice_is_dvm_ena(hw) && 4294 list_itr->fltr_info.l_data.vlan.tpid == 0) 4295 continue; 4296 4297 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; 4298 if (rm_vlan_promisc) 4299 status = ice_clear_vsi_promisc(hw, vsi_handle, 4300 promisc_mask, vlan_id); 4301 else 4302 status = ice_set_vsi_promisc(hw, vsi_handle, 4303 promisc_mask, vlan_id); 4304 if (status && status != -EEXIST) 4305 break; 4306 } 4307 4308 free_fltr_list: 4309 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) { 4310 list_del(&list_itr->list_entry); 4311 devm_kfree(ice_hw_to_dev(hw), list_itr); 4312 } 4313 return status; 4314 } 4315 4316 /** 4317 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI 4318 * @hw: pointer to the hardware structure 4319 * @vsi_handle: VSI handle to remove filters from 4320 * @lkup: switch rule filter lookup type 4321 */ 4322 static void 4323 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, 4324 enum ice_sw_lkup_type lkup) 4325 { 4326 struct ice_switch_info *sw = hw->switch_info; 4327 struct ice_fltr_list_entry *fm_entry; 4328 struct list_head remove_list_head; 4329 struct list_head *rule_head; 4330 struct ice_fltr_list_entry *tmp; 4331 struct mutex *rule_lock; /* Lock to protect filter rule list */ 4332 int status; 4333 4334 INIT_LIST_HEAD(&remove_list_head); 4335 rule_lock = &sw->recp_list[lkup].filt_rule_lock; 4336 rule_head = &sw->recp_list[lkup].filt_rules; 4337 mutex_lock(rule_lock); 4338 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head, 4339 &remove_list_head); 4340 mutex_unlock(rule_lock); 4341 if (status) 4342 goto free_fltr_list; 4343 4344 switch (lkup) { 4345 case ICE_SW_LKUP_MAC: 4346 ice_remove_mac(hw, &remove_list_head); 4347 break; 4348 case ICE_SW_LKUP_VLAN: 4349 ice_remove_vlan(hw, &remove_list_head); 4350 break; 4351 case ICE_SW_LKUP_PROMISC: 4352 case ICE_SW_LKUP_PROMISC_VLAN: 4353 ice_remove_promisc(hw, lkup, &remove_list_head); 4354 break; 4355 case ICE_SW_LKUP_MAC_VLAN: 4356 case ICE_SW_LKUP_ETHERTYPE: 4357 case ICE_SW_LKUP_ETHERTYPE_MAC: 4358 case ICE_SW_LKUP_DFLT: 4359 case ICE_SW_LKUP_LAST: 4360 default: 4361 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup); 4362 break; 4363 } 4364 4365 free_fltr_list: 4366 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 4367 list_del(&fm_entry->list_entry); 4368 devm_kfree(ice_hw_to_dev(hw), fm_entry); 4369 } 4370 } 4371 4372 /** 4373 * ice_remove_vsi_fltr - Remove all filters for a VSI 4374 * @hw: pointer to the hardware structure 4375 * @vsi_handle: VSI handle to remove filters from 4376 */ 4377 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) 4378 { 4379 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC); 4380 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN); 4381 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC); 4382 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN); 4383 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT); 4384 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE); 4385 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC); 4386 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN); 4387 } 4388 4389 /** 4390 * ice_alloc_res_cntr - allocating resource counter 4391 * @hw: pointer to the hardware structure 4392 * @type: type of resource 4393 * @alloc_shared: if set it is shared else dedicated 4394 * @num_items: number of entries requested for FD resource type 4395 * @counter_id: counter index returned by AQ call 4396 */ 4397 int 4398 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 4399 u16 *counter_id) 4400 { 4401 struct ice_aqc_alloc_free_res_elem *buf; 4402 u16 buf_len; 4403 int status; 4404 4405 /* Allocate resource */ 4406 buf_len = struct_size(buf, elem, 1); 4407 buf = kzalloc(buf_len, GFP_KERNEL); 4408 if (!buf) 4409 return -ENOMEM; 4410 4411 buf->num_elems = cpu_to_le16(num_items); 4412 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 4413 ICE_AQC_RES_TYPE_M) | alloc_shared); 4414 4415 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 4416 ice_aqc_opc_alloc_res, NULL); 4417 if (status) 4418 goto exit; 4419 4420 *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp); 4421 4422 exit: 4423 kfree(buf); 4424 return status; 4425 } 4426 4427 /** 4428 * ice_free_res_cntr - free resource counter 4429 * @hw: pointer to the hardware structure 4430 * @type: type of resource 4431 * @alloc_shared: if set it is shared else dedicated 4432 * @num_items: number of entries to be freed for FD resource type 4433 * @counter_id: counter ID resource which needs to be freed 4434 */ 4435 int 4436 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 4437 u16 counter_id) 4438 { 4439 struct ice_aqc_alloc_free_res_elem *buf; 4440 u16 buf_len; 4441 int status; 4442 4443 /* Free resource */ 4444 buf_len = struct_size(buf, elem, 1); 4445 buf = kzalloc(buf_len, GFP_KERNEL); 4446 if (!buf) 4447 return -ENOMEM; 4448 4449 buf->num_elems = cpu_to_le16(num_items); 4450 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 4451 ICE_AQC_RES_TYPE_M) | alloc_shared); 4452 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id); 4453 4454 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 4455 ice_aqc_opc_free_res, NULL); 4456 if (status) 4457 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n"); 4458 4459 kfree(buf); 4460 return status; 4461 } 4462 4463 /* This is mapping table entry that maps every word within a given protocol 4464 * structure to the real byte offset as per the specification of that 4465 * protocol header. 4466 * for example dst address is 3 words in ethertype header and corresponding 4467 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8 4468 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a 4469 * matching entry describing its field. This needs to be updated if new 4470 * structure is added to that union. 4471 */ 4472 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { 4473 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } }, 4474 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } }, 4475 { ICE_ETYPE_OL, { 0 } }, 4476 { ICE_ETYPE_IL, { 0 } }, 4477 { ICE_VLAN_OFOS, { 2, 0 } }, 4478 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, 4479 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, 4480 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 4481 26, 28, 30, 32, 34, 36, 38 } }, 4482 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 4483 26, 28, 30, 32, 34, 36, 38 } }, 4484 { ICE_TCP_IL, { 0, 2 } }, 4485 { ICE_UDP_OF, { 0, 2 } }, 4486 { ICE_UDP_ILOS, { 0, 2 } }, 4487 { ICE_VXLAN, { 8, 10, 12, 14 } }, 4488 { ICE_GENEVE, { 8, 10, 12, 14 } }, 4489 { ICE_NVGRE, { 0, 2, 4, 6 } }, 4490 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } }, 4491 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } }, 4492 { ICE_PPPOE, { 0, 2, 4, 6 } }, 4493 { ICE_VLAN_EX, { 2, 0 } }, 4494 { ICE_VLAN_IN, { 2, 0 } }, 4495 }; 4496 4497 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { 4498 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW }, 4499 { ICE_MAC_IL, ICE_MAC_IL_HW }, 4500 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW }, 4501 { ICE_ETYPE_IL, ICE_ETYPE_IL_HW }, 4502 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW }, 4503 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW }, 4504 { ICE_IPV4_IL, ICE_IPV4_IL_HW }, 4505 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW }, 4506 { ICE_IPV6_IL, ICE_IPV6_IL_HW }, 4507 { ICE_TCP_IL, ICE_TCP_IL_HW }, 4508 { ICE_UDP_OF, ICE_UDP_OF_HW }, 4509 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW }, 4510 { ICE_VXLAN, ICE_UDP_OF_HW }, 4511 { ICE_GENEVE, ICE_UDP_OF_HW }, 4512 { ICE_NVGRE, ICE_GRE_OF_HW }, 4513 { ICE_GTP, ICE_UDP_OF_HW }, 4514 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW }, 4515 { ICE_PPPOE, ICE_PPPOE_HW }, 4516 { ICE_VLAN_EX, ICE_VLAN_OF_HW }, 4517 { ICE_VLAN_IN, ICE_VLAN_OL_HW }, 4518 }; 4519 4520 /** 4521 * ice_find_recp - find a recipe 4522 * @hw: pointer to the hardware structure 4523 * @lkup_exts: extension sequence to match 4524 * @tun_type: type of recipe tunnel 4525 * 4526 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. 4527 */ 4528 static u16 4529 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, 4530 enum ice_sw_tunnel_type tun_type) 4531 { 4532 bool refresh_required = true; 4533 struct ice_sw_recipe *recp; 4534 u8 i; 4535 4536 /* Walk through existing recipes to find a match */ 4537 recp = hw->switch_info->recp_list; 4538 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 4539 /* If recipe was not created for this ID, in SW bookkeeping, 4540 * check if FW has an entry for this recipe. If the FW has an 4541 * entry update it in our SW bookkeeping and continue with the 4542 * matching. 4543 */ 4544 if (!recp[i].recp_created) 4545 if (ice_get_recp_frm_fw(hw, 4546 hw->switch_info->recp_list, i, 4547 &refresh_required)) 4548 continue; 4549 4550 /* Skip inverse action recipes */ 4551 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl & 4552 ICE_AQ_RECIPE_ACT_INV_ACT) 4553 continue; 4554 4555 /* if number of words we are looking for match */ 4556 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) { 4557 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words; 4558 struct ice_fv_word *be = lkup_exts->fv_words; 4559 u16 *cr = recp[i].lkup_exts.field_mask; 4560 u16 *de = lkup_exts->field_mask; 4561 bool found = true; 4562 u8 pe, qr; 4563 4564 /* ar, cr, and qr are related to the recipe words, while 4565 * be, de, and pe are related to the lookup words 4566 */ 4567 for (pe = 0; pe < lkup_exts->n_val_words; pe++) { 4568 for (qr = 0; qr < recp[i].lkup_exts.n_val_words; 4569 qr++) { 4570 if (ar[qr].off == be[pe].off && 4571 ar[qr].prot_id == be[pe].prot_id && 4572 cr[qr] == de[pe]) 4573 /* Found the "pe"th word in the 4574 * given recipe 4575 */ 4576 break; 4577 } 4578 /* After walking through all the words in the 4579 * "i"th recipe if "p"th word was not found then 4580 * this recipe is not what we are looking for. 4581 * So break out from this loop and try the next 4582 * recipe 4583 */ 4584 if (qr >= recp[i].lkup_exts.n_val_words) { 4585 found = false; 4586 break; 4587 } 4588 } 4589 /* If for "i"th recipe the found was never set to false 4590 * then it means we found our match 4591 * Also tun type of recipe needs to be checked 4592 */ 4593 if (found && recp[i].tun_type == tun_type) 4594 return i; /* Return the recipe ID */ 4595 } 4596 } 4597 return ICE_MAX_NUM_RECIPES; 4598 } 4599 4600 /** 4601 * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl 4602 * 4603 * As protocol id for outer vlan is different in dvm and svm, if dvm is 4604 * supported protocol array record for outer vlan has to be modified to 4605 * reflect the value proper for DVM. 4606 */ 4607 void ice_change_proto_id_to_dvm(void) 4608 { 4609 u8 i; 4610 4611 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++) 4612 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS && 4613 ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW) 4614 ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW; 4615 } 4616 4617 /** 4618 * ice_prot_type_to_id - get protocol ID from protocol type 4619 * @type: protocol type 4620 * @id: pointer to variable that will receive the ID 4621 * 4622 * Returns true if found, false otherwise 4623 */ 4624 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id) 4625 { 4626 u8 i; 4627 4628 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++) 4629 if (ice_prot_id_tbl[i].type == type) { 4630 *id = ice_prot_id_tbl[i].protocol_id; 4631 return true; 4632 } 4633 return false; 4634 } 4635 4636 /** 4637 * ice_fill_valid_words - count valid words 4638 * @rule: advanced rule with lookup information 4639 * @lkup_exts: byte offset extractions of the words that are valid 4640 * 4641 * calculate valid words in a lookup rule using mask value 4642 */ 4643 static u8 4644 ice_fill_valid_words(struct ice_adv_lkup_elem *rule, 4645 struct ice_prot_lkup_ext *lkup_exts) 4646 { 4647 u8 j, word, prot_id, ret_val; 4648 4649 if (!ice_prot_type_to_id(rule->type, &prot_id)) 4650 return 0; 4651 4652 word = lkup_exts->n_val_words; 4653 4654 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++) 4655 if (((u16 *)&rule->m_u)[j] && 4656 rule->type < ARRAY_SIZE(ice_prot_ext)) { 4657 /* No more space to accommodate */ 4658 if (word >= ICE_MAX_CHAIN_WORDS) 4659 return 0; 4660 lkup_exts->fv_words[word].off = 4661 ice_prot_ext[rule->type].offs[j]; 4662 lkup_exts->fv_words[word].prot_id = 4663 ice_prot_id_tbl[rule->type].protocol_id; 4664 lkup_exts->field_mask[word] = 4665 be16_to_cpu(((__force __be16 *)&rule->m_u)[j]); 4666 word++; 4667 } 4668 4669 ret_val = word - lkup_exts->n_val_words; 4670 lkup_exts->n_val_words = word; 4671 4672 return ret_val; 4673 } 4674 4675 /** 4676 * ice_create_first_fit_recp_def - Create a recipe grouping 4677 * @hw: pointer to the hardware structure 4678 * @lkup_exts: an array of protocol header extractions 4679 * @rg_list: pointer to a list that stores new recipe groups 4680 * @recp_cnt: pointer to a variable that stores returned number of recipe groups 4681 * 4682 * Using first fit algorithm, take all the words that are still not done 4683 * and start grouping them in 4-word groups. Each group makes up one 4684 * recipe. 4685 */ 4686 static int 4687 ice_create_first_fit_recp_def(struct ice_hw *hw, 4688 struct ice_prot_lkup_ext *lkup_exts, 4689 struct list_head *rg_list, 4690 u8 *recp_cnt) 4691 { 4692 struct ice_pref_recipe_group *grp = NULL; 4693 u8 j; 4694 4695 *recp_cnt = 0; 4696 4697 /* Walk through every word in the rule to check if it is not done. If so 4698 * then this word needs to be part of a new recipe. 4699 */ 4700 for (j = 0; j < lkup_exts->n_val_words; j++) 4701 if (!test_bit(j, lkup_exts->done)) { 4702 if (!grp || 4703 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) { 4704 struct ice_recp_grp_entry *entry; 4705 4706 entry = devm_kzalloc(ice_hw_to_dev(hw), 4707 sizeof(*entry), 4708 GFP_KERNEL); 4709 if (!entry) 4710 return -ENOMEM; 4711 list_add(&entry->l_entry, rg_list); 4712 grp = &entry->r_group; 4713 (*recp_cnt)++; 4714 } 4715 4716 grp->pairs[grp->n_val_pairs].prot_id = 4717 lkup_exts->fv_words[j].prot_id; 4718 grp->pairs[grp->n_val_pairs].off = 4719 lkup_exts->fv_words[j].off; 4720 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j]; 4721 grp->n_val_pairs++; 4722 } 4723 4724 return 0; 4725 } 4726 4727 /** 4728 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group 4729 * @hw: pointer to the hardware structure 4730 * @fv_list: field vector with the extraction sequence information 4731 * @rg_list: recipe groupings with protocol-offset pairs 4732 * 4733 * Helper function to fill in the field vector indices for protocol-offset 4734 * pairs. These indexes are then ultimately programmed into a recipe. 4735 */ 4736 static int 4737 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, 4738 struct list_head *rg_list) 4739 { 4740 struct ice_sw_fv_list_entry *fv; 4741 struct ice_recp_grp_entry *rg; 4742 struct ice_fv_word *fv_ext; 4743 4744 if (list_empty(fv_list)) 4745 return 0; 4746 4747 fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry, 4748 list_entry); 4749 fv_ext = fv->fv_ptr->ew; 4750 4751 list_for_each_entry(rg, rg_list, l_entry) { 4752 u8 i; 4753 4754 for (i = 0; i < rg->r_group.n_val_pairs; i++) { 4755 struct ice_fv_word *pr; 4756 bool found = false; 4757 u16 mask; 4758 u8 j; 4759 4760 pr = &rg->r_group.pairs[i]; 4761 mask = rg->r_group.mask[i]; 4762 4763 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 4764 if (fv_ext[j].prot_id == pr->prot_id && 4765 fv_ext[j].off == pr->off) { 4766 found = true; 4767 4768 /* Store index of field vector */ 4769 rg->fv_idx[i] = j; 4770 rg->fv_mask[i] = mask; 4771 break; 4772 } 4773 4774 /* Protocol/offset could not be found, caller gave an 4775 * invalid pair 4776 */ 4777 if (!found) 4778 return -EINVAL; 4779 } 4780 } 4781 4782 return 0; 4783 } 4784 4785 /** 4786 * ice_find_free_recp_res_idx - find free result indexes for recipe 4787 * @hw: pointer to hardware structure 4788 * @profiles: bitmap of profiles that will be associated with the new recipe 4789 * @free_idx: pointer to variable to receive the free index bitmap 4790 * 4791 * The algorithm used here is: 4792 * 1. When creating a new recipe, create a set P which contains all 4793 * Profiles that will be associated with our new recipe 4794 * 4795 * 2. For each Profile p in set P: 4796 * a. Add all recipes associated with Profile p into set R 4797 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes 4798 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF] 4799 * i. Or just assume they all have the same possible indexes: 4800 * 44, 45, 46, 47 4801 * i.e., PossibleIndexes = 0x0000F00000000000 4802 * 4803 * 3. For each Recipe r in set R: 4804 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes 4805 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes 4806 * 4807 * FreeIndexes will contain the bits indicating the indexes free for use, 4808 * then the code needs to update the recipe[r].used_result_idx_bits to 4809 * indicate which indexes were selected for use by this recipe. 4810 */ 4811 static u16 4812 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, 4813 unsigned long *free_idx) 4814 { 4815 DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS); 4816 DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES); 4817 DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS); 4818 u16 bit; 4819 4820 bitmap_zero(recipes, ICE_MAX_NUM_RECIPES); 4821 bitmap_zero(used_idx, ICE_MAX_FV_WORDS); 4822 4823 bitmap_fill(possible_idx, ICE_MAX_FV_WORDS); 4824 4825 /* For each profile we are going to associate the recipe with, add the 4826 * recipes that are associated with that profile. This will give us 4827 * the set of recipes that our recipe may collide with. Also, determine 4828 * what possible result indexes are usable given this set of profiles. 4829 */ 4830 for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) { 4831 bitmap_or(recipes, recipes, profile_to_recipe[bit], 4832 ICE_MAX_NUM_RECIPES); 4833 bitmap_and(possible_idx, possible_idx, 4834 hw->switch_info->prof_res_bm[bit], 4835 ICE_MAX_FV_WORDS); 4836 } 4837 4838 /* For each recipe that our new recipe may collide with, determine 4839 * which indexes have been used. 4840 */ 4841 for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES) 4842 bitmap_or(used_idx, used_idx, 4843 hw->switch_info->recp_list[bit].res_idxs, 4844 ICE_MAX_FV_WORDS); 4845 4846 bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS); 4847 4848 /* return number of free indexes */ 4849 return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS); 4850 } 4851 4852 /** 4853 * ice_add_sw_recipe - function to call AQ calls to create switch recipe 4854 * @hw: pointer to hardware structure 4855 * @rm: recipe management list entry 4856 * @profiles: bitmap of profiles that will be associated. 4857 */ 4858 static int 4859 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, 4860 unsigned long *profiles) 4861 { 4862 DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); 4863 struct ice_aqc_recipe_data_elem *tmp; 4864 struct ice_aqc_recipe_data_elem *buf; 4865 struct ice_recp_grp_entry *entry; 4866 u16 free_res_idx; 4867 u16 recipe_count; 4868 u8 chain_idx; 4869 u8 recps = 0; 4870 int status; 4871 4872 /* When more than one recipe are required, another recipe is needed to 4873 * chain them together. Matching a tunnel metadata ID takes up one of 4874 * the match fields in the chaining recipe reducing the number of 4875 * chained recipes by one. 4876 */ 4877 /* check number of free result indices */ 4878 bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS); 4879 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm); 4880 4881 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n", 4882 free_res_idx, rm->n_grp_count); 4883 4884 if (rm->n_grp_count > 1) { 4885 if (rm->n_grp_count > free_res_idx) 4886 return -ENOSPC; 4887 4888 rm->n_grp_count++; 4889 } 4890 4891 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) 4892 return -ENOSPC; 4893 4894 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); 4895 if (!tmp) 4896 return -ENOMEM; 4897 4898 buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), 4899 GFP_KERNEL); 4900 if (!buf) { 4901 status = -ENOMEM; 4902 goto err_mem; 4903 } 4904 4905 bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); 4906 recipe_count = ICE_MAX_NUM_RECIPES; 4907 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC, 4908 NULL); 4909 if (status || recipe_count == 0) 4910 goto err_unroll; 4911 4912 /* Allocate the recipe resources, and configure them according to the 4913 * match fields from protocol headers and extracted field vectors. 4914 */ 4915 chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); 4916 list_for_each_entry(entry, &rm->rg_list, l_entry) { 4917 u8 i; 4918 4919 status = ice_alloc_recipe(hw, &entry->rid); 4920 if (status) 4921 goto err_unroll; 4922 4923 /* Clear the result index of the located recipe, as this will be 4924 * updated, if needed, later in the recipe creation process. 4925 */ 4926 tmp[0].content.result_indx = 0; 4927 4928 buf[recps] = tmp[0]; 4929 buf[recps].recipe_indx = (u8)entry->rid; 4930 /* if the recipe is a non-root recipe RID should be programmed 4931 * as 0 for the rules to be applied correctly. 4932 */ 4933 buf[recps].content.rid = 0; 4934 memset(&buf[recps].content.lkup_indx, 0, 4935 sizeof(buf[recps].content.lkup_indx)); 4936 4937 /* All recipes use look-up index 0 to match switch ID. */ 4938 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 4939 buf[recps].content.mask[0] = 4940 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 4941 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask 4942 * to be 0 4943 */ 4944 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { 4945 buf[recps].content.lkup_indx[i] = 0x80; 4946 buf[recps].content.mask[i] = 0; 4947 } 4948 4949 for (i = 0; i < entry->r_group.n_val_pairs; i++) { 4950 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i]; 4951 buf[recps].content.mask[i + 1] = 4952 cpu_to_le16(entry->fv_mask[i]); 4953 } 4954 4955 if (rm->n_grp_count > 1) { 4956 /* Checks to see if there really is a valid result index 4957 * that can be used. 4958 */ 4959 if (chain_idx >= ICE_MAX_FV_WORDS) { 4960 ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); 4961 status = -ENOSPC; 4962 goto err_unroll; 4963 } 4964 4965 entry->chain_idx = chain_idx; 4966 buf[recps].content.result_indx = 4967 ICE_AQ_RECIPE_RESULT_EN | 4968 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & 4969 ICE_AQ_RECIPE_RESULT_DATA_M); 4970 clear_bit(chain_idx, result_idx_bm); 4971 chain_idx = find_first_bit(result_idx_bm, 4972 ICE_MAX_FV_WORDS); 4973 } 4974 4975 /* fill recipe dependencies */ 4976 bitmap_zero((unsigned long *)buf[recps].recipe_bitmap, 4977 ICE_MAX_NUM_RECIPES); 4978 set_bit(buf[recps].recipe_indx, 4979 (unsigned long *)buf[recps].recipe_bitmap); 4980 buf[recps].content.act_ctrl_fwd_priority = rm->priority; 4981 recps++; 4982 } 4983 4984 if (rm->n_grp_count == 1) { 4985 rm->root_rid = buf[0].recipe_indx; 4986 set_bit(buf[0].recipe_indx, rm->r_bitmap); 4987 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT; 4988 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) { 4989 memcpy(buf[0].recipe_bitmap, rm->r_bitmap, 4990 sizeof(buf[0].recipe_bitmap)); 4991 } else { 4992 status = -EINVAL; 4993 goto err_unroll; 4994 } 4995 /* Applicable only for ROOT_RECIPE, set the fwd_priority for 4996 * the recipe which is getting created if specified 4997 * by user. Usually any advanced switch filter, which results 4998 * into new extraction sequence, ended up creating a new recipe 4999 * of type ROOT and usually recipes are associated with profiles 5000 * Switch rule referreing newly created recipe, needs to have 5001 * either/or 'fwd' or 'join' priority, otherwise switch rule 5002 * evaluation will not happen correctly. In other words, if 5003 * switch rule to be evaluated on priority basis, then recipe 5004 * needs to have priority, otherwise it will be evaluated last. 5005 */ 5006 buf[0].content.act_ctrl_fwd_priority = rm->priority; 5007 } else { 5008 struct ice_recp_grp_entry *last_chain_entry; 5009 u16 rid, i; 5010 5011 /* Allocate the last recipe that will chain the outcomes of the 5012 * other recipes together 5013 */ 5014 status = ice_alloc_recipe(hw, &rid); 5015 if (status) 5016 goto err_unroll; 5017 5018 buf[recps].recipe_indx = (u8)rid; 5019 buf[recps].content.rid = (u8)rid; 5020 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT; 5021 /* the new entry created should also be part of rg_list to 5022 * make sure we have complete recipe 5023 */ 5024 last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw), 5025 sizeof(*last_chain_entry), 5026 GFP_KERNEL); 5027 if (!last_chain_entry) { 5028 status = -ENOMEM; 5029 goto err_unroll; 5030 } 5031 last_chain_entry->rid = rid; 5032 memset(&buf[recps].content.lkup_indx, 0, 5033 sizeof(buf[recps].content.lkup_indx)); 5034 /* All recipes use look-up index 0 to match switch ID. */ 5035 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 5036 buf[recps].content.mask[0] = 5037 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 5038 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { 5039 buf[recps].content.lkup_indx[i] = 5040 ICE_AQ_RECIPE_LKUP_IGNORE; 5041 buf[recps].content.mask[i] = 0; 5042 } 5043 5044 i = 1; 5045 /* update r_bitmap with the recp that is used for chaining */ 5046 set_bit(rid, rm->r_bitmap); 5047 /* this is the recipe that chains all the other recipes so it 5048 * should not have a chaining ID to indicate the same 5049 */ 5050 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; 5051 list_for_each_entry(entry, &rm->rg_list, l_entry) { 5052 last_chain_entry->fv_idx[i] = entry->chain_idx; 5053 buf[recps].content.lkup_indx[i] = entry->chain_idx; 5054 buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF); 5055 set_bit(entry->rid, rm->r_bitmap); 5056 } 5057 list_add(&last_chain_entry->l_entry, &rm->rg_list); 5058 if (sizeof(buf[recps].recipe_bitmap) >= 5059 sizeof(rm->r_bitmap)) { 5060 memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, 5061 sizeof(buf[recps].recipe_bitmap)); 5062 } else { 5063 status = -EINVAL; 5064 goto err_unroll; 5065 } 5066 buf[recps].content.act_ctrl_fwd_priority = rm->priority; 5067 5068 recps++; 5069 rm->root_rid = (u8)rid; 5070 } 5071 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 5072 if (status) 5073 goto err_unroll; 5074 5075 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL); 5076 ice_release_change_lock(hw); 5077 if (status) 5078 goto err_unroll; 5079 5080 /* Every recipe that just got created add it to the recipe 5081 * book keeping list 5082 */ 5083 list_for_each_entry(entry, &rm->rg_list, l_entry) { 5084 struct ice_switch_info *sw = hw->switch_info; 5085 bool is_root, idx_found = false; 5086 struct ice_sw_recipe *recp; 5087 u16 idx, buf_idx = 0; 5088 5089 /* find buffer index for copying some data */ 5090 for (idx = 0; idx < rm->n_grp_count; idx++) 5091 if (buf[idx].recipe_indx == entry->rid) { 5092 buf_idx = idx; 5093 idx_found = true; 5094 } 5095 5096 if (!idx_found) { 5097 status = -EIO; 5098 goto err_unroll; 5099 } 5100 5101 recp = &sw->recp_list[entry->rid]; 5102 is_root = (rm->root_rid == entry->rid); 5103 recp->is_root = is_root; 5104 5105 recp->root_rid = entry->rid; 5106 recp->big_recp = (is_root && rm->n_grp_count > 1); 5107 5108 memcpy(&recp->ext_words, entry->r_group.pairs, 5109 entry->r_group.n_val_pairs * sizeof(struct ice_fv_word)); 5110 5111 memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap, 5112 sizeof(recp->r_bitmap)); 5113 5114 /* Copy non-result fv index values and masks to recipe. This 5115 * call will also update the result recipe bitmask. 5116 */ 5117 ice_collect_result_idx(&buf[buf_idx], recp); 5118 5119 /* for non-root recipes, also copy to the root, this allows 5120 * easier matching of a complete chained recipe 5121 */ 5122 if (!is_root) 5123 ice_collect_result_idx(&buf[buf_idx], 5124 &sw->recp_list[rm->root_rid]); 5125 5126 recp->n_ext_words = entry->r_group.n_val_pairs; 5127 recp->chain_idx = entry->chain_idx; 5128 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; 5129 recp->n_grp_count = rm->n_grp_count; 5130 recp->tun_type = rm->tun_type; 5131 recp->recp_created = true; 5132 } 5133 rm->root_buf = buf; 5134 kfree(tmp); 5135 return status; 5136 5137 err_unroll: 5138 err_mem: 5139 kfree(tmp); 5140 devm_kfree(ice_hw_to_dev(hw), buf); 5141 return status; 5142 } 5143 5144 /** 5145 * ice_create_recipe_group - creates recipe group 5146 * @hw: pointer to hardware structure 5147 * @rm: recipe management list entry 5148 * @lkup_exts: lookup elements 5149 */ 5150 static int 5151 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, 5152 struct ice_prot_lkup_ext *lkup_exts) 5153 { 5154 u8 recp_count = 0; 5155 int status; 5156 5157 rm->n_grp_count = 0; 5158 5159 /* Create recipes for words that are marked not done by packing them 5160 * as best fit. 5161 */ 5162 status = ice_create_first_fit_recp_def(hw, lkup_exts, 5163 &rm->rg_list, &recp_count); 5164 if (!status) { 5165 rm->n_grp_count += recp_count; 5166 rm->n_ext_words = lkup_exts->n_val_words; 5167 memcpy(&rm->ext_words, lkup_exts->fv_words, 5168 sizeof(rm->ext_words)); 5169 memcpy(rm->word_masks, lkup_exts->field_mask, 5170 sizeof(rm->word_masks)); 5171 } 5172 5173 return status; 5174 } 5175 5176 /** 5177 * ice_tun_type_match_word - determine if tun type needs a match mask 5178 * @tun_type: tunnel type 5179 * @mask: mask to be used for the tunnel 5180 */ 5181 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) 5182 { 5183 switch (tun_type) { 5184 case ICE_SW_TUN_GENEVE: 5185 case ICE_SW_TUN_VXLAN: 5186 case ICE_SW_TUN_NVGRE: 5187 case ICE_SW_TUN_GTPU: 5188 case ICE_SW_TUN_GTPC: 5189 *mask = ICE_TUN_FLAG_MASK; 5190 return true; 5191 5192 default: 5193 *mask = 0; 5194 return false; 5195 } 5196 } 5197 5198 /** 5199 * ice_add_special_words - Add words that are not protocols, such as metadata 5200 * @rinfo: other information regarding the rule e.g. priority and action info 5201 * @lkup_exts: lookup word structure 5202 * @dvm_ena: is double VLAN mode enabled 5203 */ 5204 static int 5205 ice_add_special_words(struct ice_adv_rule_info *rinfo, 5206 struct ice_prot_lkup_ext *lkup_exts, bool dvm_ena) 5207 { 5208 u16 mask; 5209 5210 /* If this is a tunneled packet, then add recipe index to match the 5211 * tunnel bit in the packet metadata flags. 5212 */ 5213 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) { 5214 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) { 5215 u8 word = lkup_exts->n_val_words++; 5216 5217 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW; 5218 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF; 5219 lkup_exts->field_mask[word] = mask; 5220 } else { 5221 return -ENOSPC; 5222 } 5223 } 5224 5225 if (rinfo->vlan_type != 0 && dvm_ena) { 5226 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) { 5227 u8 word = lkup_exts->n_val_words++; 5228 5229 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW; 5230 lkup_exts->fv_words[word].off = ICE_VLAN_FLAG_MDID_OFF; 5231 lkup_exts->field_mask[word] = 5232 ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK; 5233 } else { 5234 return -ENOSPC; 5235 } 5236 } 5237 5238 return 0; 5239 } 5240 5241 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule 5242 * @hw: pointer to hardware structure 5243 * @rinfo: other information regarding the rule e.g. priority and action info 5244 * @bm: pointer to memory for returning the bitmap of field vectors 5245 */ 5246 static void 5247 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, 5248 unsigned long *bm) 5249 { 5250 enum ice_prof_type prof_type; 5251 5252 bitmap_zero(bm, ICE_MAX_NUM_PROFILES); 5253 5254 switch (rinfo->tun_type) { 5255 case ICE_NON_TUN: 5256 prof_type = ICE_PROF_NON_TUN; 5257 break; 5258 case ICE_ALL_TUNNELS: 5259 prof_type = ICE_PROF_TUN_ALL; 5260 break; 5261 case ICE_SW_TUN_GENEVE: 5262 case ICE_SW_TUN_VXLAN: 5263 prof_type = ICE_PROF_TUN_UDP; 5264 break; 5265 case ICE_SW_TUN_NVGRE: 5266 prof_type = ICE_PROF_TUN_GRE; 5267 break; 5268 case ICE_SW_TUN_GTPU: 5269 prof_type = ICE_PROF_TUN_GTPU; 5270 break; 5271 case ICE_SW_TUN_GTPC: 5272 prof_type = ICE_PROF_TUN_GTPC; 5273 break; 5274 case ICE_SW_TUN_AND_NON_TUN: 5275 default: 5276 prof_type = ICE_PROF_ALL; 5277 break; 5278 } 5279 5280 ice_get_sw_fv_bitmap(hw, prof_type, bm); 5281 } 5282 5283 /** 5284 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default 5285 * @hw: pointer to hardware structure 5286 * @lkups: lookup elements or match criteria for the advanced recipe, one 5287 * structure per protocol header 5288 * @lkups_cnt: number of protocols 5289 * @rinfo: other information regarding the rule e.g. priority and action info 5290 * @rid: return the recipe ID of the recipe created 5291 */ 5292 static int 5293 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 5294 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid) 5295 { 5296 DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES); 5297 DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES); 5298 struct ice_prot_lkup_ext *lkup_exts; 5299 struct ice_recp_grp_entry *r_entry; 5300 struct ice_sw_fv_list_entry *fvit; 5301 struct ice_recp_grp_entry *r_tmp; 5302 struct ice_sw_fv_list_entry *tmp; 5303 struct ice_sw_recipe *rm; 5304 int status = 0; 5305 u8 i; 5306 5307 if (!lkups_cnt) 5308 return -EINVAL; 5309 5310 lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL); 5311 if (!lkup_exts) 5312 return -ENOMEM; 5313 5314 /* Determine the number of words to be matched and if it exceeds a 5315 * recipe's restrictions 5316 */ 5317 for (i = 0; i < lkups_cnt; i++) { 5318 u16 count; 5319 5320 if (lkups[i].type >= ICE_PROTOCOL_LAST) { 5321 status = -EIO; 5322 goto err_free_lkup_exts; 5323 } 5324 5325 count = ice_fill_valid_words(&lkups[i], lkup_exts); 5326 if (!count) { 5327 status = -EIO; 5328 goto err_free_lkup_exts; 5329 } 5330 } 5331 5332 rm = kzalloc(sizeof(*rm), GFP_KERNEL); 5333 if (!rm) { 5334 status = -ENOMEM; 5335 goto err_free_lkup_exts; 5336 } 5337 5338 /* Get field vectors that contain fields extracted from all the protocol 5339 * headers being programmed. 5340 */ 5341 INIT_LIST_HEAD(&rm->fv_list); 5342 INIT_LIST_HEAD(&rm->rg_list); 5343 5344 /* Get bitmap of field vectors (profiles) that are compatible with the 5345 * rule request; only these will be searched in the subsequent call to 5346 * ice_get_sw_fv_list. 5347 */ 5348 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap); 5349 5350 status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list); 5351 if (status) 5352 goto err_unroll; 5353 5354 /* Create any special protocol/offset pairs, such as looking at tunnel 5355 * bits by extracting metadata 5356 */ 5357 status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw)); 5358 if (status) 5359 goto err_free_lkup_exts; 5360 5361 /* Group match words into recipes using preferred recipe grouping 5362 * criteria. 5363 */ 5364 status = ice_create_recipe_group(hw, rm, lkup_exts); 5365 if (status) 5366 goto err_unroll; 5367 5368 /* set the recipe priority if specified */ 5369 rm->priority = (u8)rinfo->priority; 5370 5371 /* Find offsets from the field vector. Pick the first one for all the 5372 * recipes. 5373 */ 5374 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list); 5375 if (status) 5376 goto err_unroll; 5377 5378 /* get bitmap of all profiles the recipe will be associated with */ 5379 bitmap_zero(profiles, ICE_MAX_NUM_PROFILES); 5380 list_for_each_entry(fvit, &rm->fv_list, list_entry) { 5381 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id); 5382 set_bit((u16)fvit->profile_id, profiles); 5383 } 5384 5385 /* Look for a recipe which matches our requested fv / mask list */ 5386 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type); 5387 if (*rid < ICE_MAX_NUM_RECIPES) 5388 /* Success if found a recipe that match the existing criteria */ 5389 goto err_unroll; 5390 5391 rm->tun_type = rinfo->tun_type; 5392 /* Recipe we need does not exist, add a recipe */ 5393 status = ice_add_sw_recipe(hw, rm, profiles); 5394 if (status) 5395 goto err_unroll; 5396 5397 /* Associate all the recipes created with all the profiles in the 5398 * common field vector. 5399 */ 5400 list_for_each_entry(fvit, &rm->fv_list, list_entry) { 5401 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); 5402 u16 j; 5403 5404 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id, 5405 (u8 *)r_bitmap, NULL); 5406 if (status) 5407 goto err_unroll; 5408 5409 bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap, 5410 ICE_MAX_NUM_RECIPES); 5411 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 5412 if (status) 5413 goto err_unroll; 5414 5415 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id, 5416 (u8 *)r_bitmap, 5417 NULL); 5418 ice_release_change_lock(hw); 5419 5420 if (status) 5421 goto err_unroll; 5422 5423 /* Update profile to recipe bitmap array */ 5424 bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap, 5425 ICE_MAX_NUM_RECIPES); 5426 5427 /* Update recipe to profile bitmap array */ 5428 for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES) 5429 set_bit((u16)fvit->profile_id, recipe_to_profile[j]); 5430 } 5431 5432 *rid = rm->root_rid; 5433 memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts, 5434 sizeof(*lkup_exts)); 5435 err_unroll: 5436 list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) { 5437 list_del(&r_entry->l_entry); 5438 devm_kfree(ice_hw_to_dev(hw), r_entry); 5439 } 5440 5441 list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) { 5442 list_del(&fvit->list_entry); 5443 devm_kfree(ice_hw_to_dev(hw), fvit); 5444 } 5445 5446 if (rm->root_buf) 5447 devm_kfree(ice_hw_to_dev(hw), rm->root_buf); 5448 5449 kfree(rm); 5450 5451 err_free_lkup_exts: 5452 kfree(lkup_exts); 5453 5454 return status; 5455 } 5456 5457 /** 5458 * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt 5459 * 5460 * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added 5461 * @num_vlan: number of VLAN tags 5462 */ 5463 static struct ice_dummy_pkt_profile * 5464 ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt, 5465 u32 num_vlan) 5466 { 5467 struct ice_dummy_pkt_profile *profile; 5468 struct ice_dummy_pkt_offsets *offsets; 5469 u32 buf_len, off, etype_off, i; 5470 u8 *pkt; 5471 5472 if (num_vlan < 1 || num_vlan > 2) 5473 return ERR_PTR(-EINVAL); 5474 5475 off = num_vlan * VLAN_HLEN; 5476 5477 buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) + 5478 dummy_pkt->offsets_len; 5479 offsets = kzalloc(buf_len, GFP_KERNEL); 5480 if (!offsets) 5481 return ERR_PTR(-ENOMEM); 5482 5483 offsets[0] = dummy_pkt->offsets[0]; 5484 if (num_vlan == 2) { 5485 offsets[1] = ice_dummy_qinq_packet_offsets[0]; 5486 offsets[2] = ice_dummy_qinq_packet_offsets[1]; 5487 } else if (num_vlan == 1) { 5488 offsets[1] = ice_dummy_vlan_packet_offsets[0]; 5489 } 5490 5491 for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) { 5492 offsets[i + num_vlan].type = dummy_pkt->offsets[i].type; 5493 offsets[i + num_vlan].offset = 5494 dummy_pkt->offsets[i].offset + off; 5495 } 5496 offsets[i + num_vlan] = dummy_pkt->offsets[i]; 5497 5498 etype_off = dummy_pkt->offsets[1].offset; 5499 5500 buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) + 5501 dummy_pkt->pkt_len; 5502 pkt = kzalloc(buf_len, GFP_KERNEL); 5503 if (!pkt) { 5504 kfree(offsets); 5505 return ERR_PTR(-ENOMEM); 5506 } 5507 5508 memcpy(pkt, dummy_pkt->pkt, etype_off); 5509 memcpy(pkt + etype_off, 5510 num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet, 5511 off); 5512 memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off, 5513 dummy_pkt->pkt_len - etype_off); 5514 5515 profile = kzalloc(sizeof(*profile), GFP_KERNEL); 5516 if (!profile) { 5517 kfree(offsets); 5518 kfree(pkt); 5519 return ERR_PTR(-ENOMEM); 5520 } 5521 5522 profile->offsets = offsets; 5523 profile->pkt = pkt; 5524 profile->pkt_len = buf_len; 5525 profile->match |= ICE_PKT_KMALLOC; 5526 5527 return profile; 5528 } 5529 5530 /** 5531 * ice_find_dummy_packet - find dummy packet 5532 * 5533 * @lkups: lookup elements or match criteria for the advanced recipe, one 5534 * structure per protocol header 5535 * @lkups_cnt: number of protocols 5536 * @tun_type: tunnel type 5537 * 5538 * Returns the &ice_dummy_pkt_profile corresponding to these lookup params. 5539 */ 5540 static const struct ice_dummy_pkt_profile * 5541 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 5542 enum ice_sw_tunnel_type tun_type) 5543 { 5544 const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles; 5545 u32 match = 0, vlan_count = 0; 5546 u16 i; 5547 5548 switch (tun_type) { 5549 case ICE_SW_TUN_GTPC: 5550 match |= ICE_PKT_TUN_GTPC; 5551 break; 5552 case ICE_SW_TUN_GTPU: 5553 match |= ICE_PKT_TUN_GTPU; 5554 break; 5555 case ICE_SW_TUN_NVGRE: 5556 match |= ICE_PKT_TUN_NVGRE; 5557 break; 5558 case ICE_SW_TUN_GENEVE: 5559 case ICE_SW_TUN_VXLAN: 5560 match |= ICE_PKT_TUN_UDP; 5561 break; 5562 default: 5563 break; 5564 } 5565 5566 for (i = 0; i < lkups_cnt; i++) { 5567 if (lkups[i].type == ICE_UDP_ILOS) 5568 match |= ICE_PKT_INNER_UDP; 5569 else if (lkups[i].type == ICE_TCP_IL) 5570 match |= ICE_PKT_INNER_TCP; 5571 else if (lkups[i].type == ICE_IPV6_OFOS) 5572 match |= ICE_PKT_OUTER_IPV6; 5573 else if (lkups[i].type == ICE_VLAN_OFOS || 5574 lkups[i].type == ICE_VLAN_EX) 5575 vlan_count++; 5576 else if (lkups[i].type == ICE_VLAN_IN) 5577 vlan_count++; 5578 else if (lkups[i].type == ICE_ETYPE_OL && 5579 lkups[i].h_u.ethertype.ethtype_id == 5580 cpu_to_be16(ICE_IPV6_ETHER_ID) && 5581 lkups[i].m_u.ethertype.ethtype_id == 5582 cpu_to_be16(0xFFFF)) 5583 match |= ICE_PKT_OUTER_IPV6; 5584 else if (lkups[i].type == ICE_ETYPE_IL && 5585 lkups[i].h_u.ethertype.ethtype_id == 5586 cpu_to_be16(ICE_IPV6_ETHER_ID) && 5587 lkups[i].m_u.ethertype.ethtype_id == 5588 cpu_to_be16(0xFFFF)) 5589 match |= ICE_PKT_INNER_IPV6; 5590 else if (lkups[i].type == ICE_IPV6_IL) 5591 match |= ICE_PKT_INNER_IPV6; 5592 else if (lkups[i].type == ICE_GTP_NO_PAY) 5593 match |= ICE_PKT_GTP_NOPAY; 5594 else if (lkups[i].type == ICE_PPPOE) { 5595 match |= ICE_PKT_PPPOE; 5596 if (lkups[i].h_u.pppoe_hdr.ppp_prot_id == 5597 htons(PPP_IPV6)) 5598 match |= ICE_PKT_OUTER_IPV6; 5599 } 5600 } 5601 5602 while (ret->match && (match & ret->match) != ret->match) 5603 ret++; 5604 5605 if (vlan_count != 0) 5606 ret = ice_dummy_packet_add_vlan(ret, vlan_count); 5607 5608 return ret; 5609 } 5610 5611 /** 5612 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria 5613 * 5614 * @lkups: lookup elements or match criteria for the advanced recipe, one 5615 * structure per protocol header 5616 * @lkups_cnt: number of protocols 5617 * @s_rule: stores rule information from the match criteria 5618 * @profile: dummy packet profile (the template, its size and header offsets) 5619 */ 5620 static int 5621 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 5622 struct ice_sw_rule_lkup_rx_tx *s_rule, 5623 const struct ice_dummy_pkt_profile *profile) 5624 { 5625 u8 *pkt; 5626 u16 i; 5627 5628 /* Start with a packet with a pre-defined/dummy content. Then, fill 5629 * in the header values to be looked up or matched. 5630 */ 5631 pkt = s_rule->hdr_data; 5632 5633 memcpy(pkt, profile->pkt, profile->pkt_len); 5634 5635 for (i = 0; i < lkups_cnt; i++) { 5636 const struct ice_dummy_pkt_offsets *offsets = profile->offsets; 5637 enum ice_protocol_type type; 5638 u16 offset = 0, len = 0, j; 5639 bool found = false; 5640 5641 /* find the start of this layer; it should be found since this 5642 * was already checked when search for the dummy packet 5643 */ 5644 type = lkups[i].type; 5645 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) { 5646 if (type == offsets[j].type) { 5647 offset = offsets[j].offset; 5648 found = true; 5649 break; 5650 } 5651 } 5652 /* this should never happen in a correct calling sequence */ 5653 if (!found) 5654 return -EINVAL; 5655 5656 switch (lkups[i].type) { 5657 case ICE_MAC_OFOS: 5658 case ICE_MAC_IL: 5659 len = sizeof(struct ice_ether_hdr); 5660 break; 5661 case ICE_ETYPE_OL: 5662 case ICE_ETYPE_IL: 5663 len = sizeof(struct ice_ethtype_hdr); 5664 break; 5665 case ICE_VLAN_OFOS: 5666 case ICE_VLAN_EX: 5667 case ICE_VLAN_IN: 5668 len = sizeof(struct ice_vlan_hdr); 5669 break; 5670 case ICE_IPV4_OFOS: 5671 case ICE_IPV4_IL: 5672 len = sizeof(struct ice_ipv4_hdr); 5673 break; 5674 case ICE_IPV6_OFOS: 5675 case ICE_IPV6_IL: 5676 len = sizeof(struct ice_ipv6_hdr); 5677 break; 5678 case ICE_TCP_IL: 5679 case ICE_UDP_OF: 5680 case ICE_UDP_ILOS: 5681 len = sizeof(struct ice_l4_hdr); 5682 break; 5683 case ICE_SCTP_IL: 5684 len = sizeof(struct ice_sctp_hdr); 5685 break; 5686 case ICE_NVGRE: 5687 len = sizeof(struct ice_nvgre_hdr); 5688 break; 5689 case ICE_VXLAN: 5690 case ICE_GENEVE: 5691 len = sizeof(struct ice_udp_tnl_hdr); 5692 break; 5693 case ICE_GTP_NO_PAY: 5694 case ICE_GTP: 5695 len = sizeof(struct ice_udp_gtp_hdr); 5696 break; 5697 case ICE_PPPOE: 5698 len = sizeof(struct ice_pppoe_hdr); 5699 break; 5700 default: 5701 return -EINVAL; 5702 } 5703 5704 /* the length should be a word multiple */ 5705 if (len % ICE_BYTES_PER_WORD) 5706 return -EIO; 5707 5708 /* We have the offset to the header start, the length, the 5709 * caller's header values and mask. Use this information to 5710 * copy the data into the dummy packet appropriately based on 5711 * the mask. Note that we need to only write the bits as 5712 * indicated by the mask to make sure we don't improperly write 5713 * over any significant packet data. 5714 */ 5715 for (j = 0; j < len / sizeof(u16); j++) { 5716 u16 *ptr = (u16 *)(pkt + offset); 5717 u16 mask = lkups[i].m_raw[j]; 5718 5719 if (!mask) 5720 continue; 5721 5722 ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask); 5723 } 5724 } 5725 5726 s_rule->hdr_len = cpu_to_le16(profile->pkt_len); 5727 5728 return 0; 5729 } 5730 5731 /** 5732 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port 5733 * @hw: pointer to the hardware structure 5734 * @tun_type: tunnel type 5735 * @pkt: dummy packet to fill in 5736 * @offsets: offset info for the dummy packet 5737 */ 5738 static int 5739 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, 5740 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets) 5741 { 5742 u16 open_port, i; 5743 5744 switch (tun_type) { 5745 case ICE_SW_TUN_VXLAN: 5746 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN)) 5747 return -EIO; 5748 break; 5749 case ICE_SW_TUN_GENEVE: 5750 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE)) 5751 return -EIO; 5752 break; 5753 default: 5754 /* Nothing needs to be done for this tunnel type */ 5755 return 0; 5756 } 5757 5758 /* Find the outer UDP protocol header and insert the port number */ 5759 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) { 5760 if (offsets[i].type == ICE_UDP_OF) { 5761 struct ice_l4_hdr *hdr; 5762 u16 offset; 5763 5764 offset = offsets[i].offset; 5765 hdr = (struct ice_l4_hdr *)&pkt[offset]; 5766 hdr->dst_port = cpu_to_be16(open_port); 5767 5768 return 0; 5769 } 5770 } 5771 5772 return -EIO; 5773 } 5774 5775 /** 5776 * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type 5777 * @vlan_type: VLAN tag type 5778 * @pkt: dummy packet to fill in 5779 * @offsets: offset info for the dummy packet 5780 */ 5781 static int 5782 ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt, 5783 const struct ice_dummy_pkt_offsets *offsets) 5784 { 5785 u16 i; 5786 5787 /* Find VLAN header and insert VLAN TPID */ 5788 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) { 5789 if (offsets[i].type == ICE_VLAN_OFOS || 5790 offsets[i].type == ICE_VLAN_EX) { 5791 struct ice_vlan_hdr *hdr; 5792 u16 offset; 5793 5794 offset = offsets[i].offset; 5795 hdr = (struct ice_vlan_hdr *)&pkt[offset]; 5796 hdr->type = cpu_to_be16(vlan_type); 5797 5798 return 0; 5799 } 5800 } 5801 5802 return -EIO; 5803 } 5804 5805 /** 5806 * ice_find_adv_rule_entry - Search a rule entry 5807 * @hw: pointer to the hardware structure 5808 * @lkups: lookup elements or match criteria for the advanced recipe, one 5809 * structure per protocol header 5810 * @lkups_cnt: number of protocols 5811 * @recp_id: recipe ID for which we are finding the rule 5812 * @rinfo: other information regarding the rule e.g. priority and action info 5813 * 5814 * Helper function to search for a given advance rule entry 5815 * Returns pointer to entry storing the rule if found 5816 */ 5817 static struct ice_adv_fltr_mgmt_list_entry * 5818 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 5819 u16 lkups_cnt, u16 recp_id, 5820 struct ice_adv_rule_info *rinfo) 5821 { 5822 struct ice_adv_fltr_mgmt_list_entry *list_itr; 5823 struct ice_switch_info *sw = hw->switch_info; 5824 int i; 5825 5826 list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules, 5827 list_entry) { 5828 bool lkups_matched = true; 5829 5830 if (lkups_cnt != list_itr->lkups_cnt) 5831 continue; 5832 for (i = 0; i < list_itr->lkups_cnt; i++) 5833 if (memcmp(&list_itr->lkups[i], &lkups[i], 5834 sizeof(*lkups))) { 5835 lkups_matched = false; 5836 break; 5837 } 5838 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag && 5839 rinfo->tun_type == list_itr->rule_info.tun_type && 5840 rinfo->vlan_type == list_itr->rule_info.vlan_type && 5841 lkups_matched) 5842 return list_itr; 5843 } 5844 return NULL; 5845 } 5846 5847 /** 5848 * ice_adv_add_update_vsi_list 5849 * @hw: pointer to the hardware structure 5850 * @m_entry: pointer to current adv filter management list entry 5851 * @cur_fltr: filter information from the book keeping entry 5852 * @new_fltr: filter information with the new VSI to be added 5853 * 5854 * Call AQ command to add or update previously created VSI list with new VSI. 5855 * 5856 * Helper function to do book keeping associated with adding filter information 5857 * The algorithm to do the booking keeping is described below : 5858 * When a VSI needs to subscribe to a given advanced filter 5859 * if only one VSI has been added till now 5860 * Allocate a new VSI list and add two VSIs 5861 * to this list using switch rule command 5862 * Update the previously created switch rule with the 5863 * newly created VSI list ID 5864 * if a VSI list was previously created 5865 * Add the new VSI to the previously created VSI list set 5866 * using the update switch rule command 5867 */ 5868 static int 5869 ice_adv_add_update_vsi_list(struct ice_hw *hw, 5870 struct ice_adv_fltr_mgmt_list_entry *m_entry, 5871 struct ice_adv_rule_info *cur_fltr, 5872 struct ice_adv_rule_info *new_fltr) 5873 { 5874 u16 vsi_list_id = 0; 5875 int status; 5876 5877 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || 5878 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP || 5879 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET) 5880 return -EOPNOTSUPP; 5881 5882 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || 5883 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) && 5884 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI || 5885 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)) 5886 return -EOPNOTSUPP; 5887 5888 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { 5889 /* Only one entry existed in the mapping and it was not already 5890 * a part of a VSI list. So, create a VSI list with the old and 5891 * new VSIs. 5892 */ 5893 struct ice_fltr_info tmp_fltr; 5894 u16 vsi_handle_arr[2]; 5895 5896 /* A rule already exists with the new VSI being added */ 5897 if (cur_fltr->sw_act.fwd_id.hw_vsi_id == 5898 new_fltr->sw_act.fwd_id.hw_vsi_id) 5899 return -EEXIST; 5900 5901 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle; 5902 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle; 5903 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 5904 &vsi_list_id, 5905 ICE_SW_LKUP_LAST); 5906 if (status) 5907 return status; 5908 5909 memset(&tmp_fltr, 0, sizeof(tmp_fltr)); 5910 tmp_fltr.flag = m_entry->rule_info.sw_act.flag; 5911 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; 5912 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 5913 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 5914 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST; 5915 5916 /* Update the previous switch rule of "forward to VSI" to 5917 * "fwd to VSI list" 5918 */ 5919 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 5920 if (status) 5921 return status; 5922 5923 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id; 5924 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST; 5925 m_entry->vsi_list_info = 5926 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 5927 vsi_list_id); 5928 } else { 5929 u16 vsi_handle = new_fltr->sw_act.vsi_handle; 5930 5931 if (!m_entry->vsi_list_info) 5932 return -EIO; 5933 5934 /* A rule already exists with the new VSI being added */ 5935 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 5936 return 0; 5937 5938 /* Update the previously created VSI list set with 5939 * the new VSI ID passed in 5940 */ 5941 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id; 5942 5943 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, 5944 vsi_list_id, false, 5945 ice_aqc_opc_update_sw_rules, 5946 ICE_SW_LKUP_LAST); 5947 /* update VSI list mapping info with new VSI ID */ 5948 if (!status) 5949 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); 5950 } 5951 if (!status) 5952 m_entry->vsi_count++; 5953 return status; 5954 } 5955 5956 /** 5957 * ice_add_adv_rule - helper function to create an advanced switch rule 5958 * @hw: pointer to the hardware structure 5959 * @lkups: information on the words that needs to be looked up. All words 5960 * together makes one recipe 5961 * @lkups_cnt: num of entries in the lkups array 5962 * @rinfo: other information related to the rule that needs to be programmed 5963 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be 5964 * ignored is case of error. 5965 * 5966 * This function can program only 1 rule at a time. The lkups is used to 5967 * describe the all the words that forms the "lookup" portion of the recipe. 5968 * These words can span multiple protocols. Callers to this function need to 5969 * pass in a list of protocol headers with lookup information along and mask 5970 * that determines which words are valid from the given protocol header. 5971 * rinfo describes other information related to this rule such as forwarding 5972 * IDs, priority of this rule, etc. 5973 */ 5974 int 5975 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 5976 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, 5977 struct ice_rule_query_data *added_entry) 5978 { 5979 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL; 5980 struct ice_sw_rule_lkup_rx_tx *s_rule = NULL; 5981 const struct ice_dummy_pkt_profile *profile; 5982 u16 rid = 0, i, rule_buf_sz, vsi_handle; 5983 struct list_head *rule_head; 5984 struct ice_switch_info *sw; 5985 u16 word_cnt; 5986 u32 act = 0; 5987 int status; 5988 u8 q_rgn; 5989 5990 /* Initialize profile to result index bitmap */ 5991 if (!hw->switch_info->prof_res_bm_init) { 5992 hw->switch_info->prof_res_bm_init = 1; 5993 ice_init_prof_result_bm(hw); 5994 } 5995 5996 if (!lkups_cnt) 5997 return -EINVAL; 5998 5999 /* get # of words we need to match */ 6000 word_cnt = 0; 6001 for (i = 0; i < lkups_cnt; i++) { 6002 u16 j; 6003 6004 for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++) 6005 if (lkups[i].m_raw[j]) 6006 word_cnt++; 6007 } 6008 6009 if (!word_cnt) 6010 return -EINVAL; 6011 6012 if (word_cnt > ICE_MAX_CHAIN_WORDS) 6013 return -ENOSPC; 6014 6015 /* locate a dummy packet */ 6016 profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type); 6017 if (IS_ERR(profile)) 6018 return PTR_ERR(profile); 6019 6020 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || 6021 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || 6022 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || 6023 rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) { 6024 status = -EIO; 6025 goto free_pkt_profile; 6026 } 6027 6028 vsi_handle = rinfo->sw_act.vsi_handle; 6029 if (!ice_is_vsi_valid(hw, vsi_handle)) { 6030 status = -EINVAL; 6031 goto free_pkt_profile; 6032 } 6033 6034 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) 6035 rinfo->sw_act.fwd_id.hw_vsi_id = 6036 ice_get_hw_vsi_num(hw, vsi_handle); 6037 if (rinfo->sw_act.flag & ICE_FLTR_TX) 6038 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle); 6039 6040 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid); 6041 if (status) 6042 goto free_pkt_profile; 6043 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); 6044 if (m_entry) { 6045 /* we have to add VSI to VSI_LIST and increment vsi_count. 6046 * Also Update VSI list so that we can change forwarding rule 6047 * if the rule already exists, we will check if it exists with 6048 * same vsi_id, if not then add it to the VSI list if it already 6049 * exists if not then create a VSI list and add the existing VSI 6050 * ID and the new VSI ID to the list 6051 * We will add that VSI to the list 6052 */ 6053 status = ice_adv_add_update_vsi_list(hw, m_entry, 6054 &m_entry->rule_info, 6055 rinfo); 6056 if (added_entry) { 6057 added_entry->rid = rid; 6058 added_entry->rule_id = m_entry->rule_info.fltr_rule_id; 6059 added_entry->vsi_handle = rinfo->sw_act.vsi_handle; 6060 } 6061 goto free_pkt_profile; 6062 } 6063 rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len); 6064 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); 6065 if (!s_rule) { 6066 status = -ENOMEM; 6067 goto free_pkt_profile; 6068 } 6069 if (!rinfo->flags_info.act_valid) { 6070 act |= ICE_SINGLE_ACT_LAN_ENABLE; 6071 act |= ICE_SINGLE_ACT_LB_ENABLE; 6072 } else { 6073 act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE | 6074 ICE_SINGLE_ACT_LB_ENABLE); 6075 } 6076 6077 switch (rinfo->sw_act.fltr_act) { 6078 case ICE_FWD_TO_VSI: 6079 act |= (rinfo->sw_act.fwd_id.hw_vsi_id << 6080 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; 6081 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; 6082 break; 6083 case ICE_FWD_TO_Q: 6084 act |= ICE_SINGLE_ACT_TO_Q; 6085 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 6086 ICE_SINGLE_ACT_Q_INDEX_M; 6087 break; 6088 case ICE_FWD_TO_QGRP: 6089 q_rgn = rinfo->sw_act.qgrp_size > 0 ? 6090 (u8)ilog2(rinfo->sw_act.qgrp_size) : 0; 6091 act |= ICE_SINGLE_ACT_TO_Q; 6092 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 6093 ICE_SINGLE_ACT_Q_INDEX_M; 6094 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & 6095 ICE_SINGLE_ACT_Q_REGION_M; 6096 break; 6097 case ICE_DROP_PACKET: 6098 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 6099 ICE_SINGLE_ACT_VALID_BIT; 6100 break; 6101 default: 6102 status = -EIO; 6103 goto err_ice_add_adv_rule; 6104 } 6105 6106 /* set the rule LOOKUP type based on caller specified 'Rx' 6107 * instead of hardcoding it to be either LOOKUP_TX/RX 6108 * 6109 * for 'Rx' set the source to be the port number 6110 * for 'Tx' set the source to be the source HW VSI number (determined 6111 * by caller) 6112 */ 6113 if (rinfo->rx) { 6114 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); 6115 s_rule->src = cpu_to_le16(hw->port_info->lport); 6116 } else { 6117 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); 6118 s_rule->src = cpu_to_le16(rinfo->sw_act.src); 6119 } 6120 6121 s_rule->recipe_id = cpu_to_le16(rid); 6122 s_rule->act = cpu_to_le32(act); 6123 6124 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile); 6125 if (status) 6126 goto err_ice_add_adv_rule; 6127 6128 if (rinfo->tun_type != ICE_NON_TUN && 6129 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) { 6130 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, 6131 s_rule->hdr_data, 6132 profile->offsets); 6133 if (status) 6134 goto err_ice_add_adv_rule; 6135 } 6136 6137 if (rinfo->vlan_type != 0 && ice_is_dvm_ena(hw)) { 6138 status = ice_fill_adv_packet_vlan(rinfo->vlan_type, 6139 s_rule->hdr_data, 6140 profile->offsets); 6141 if (status) 6142 goto err_ice_add_adv_rule; 6143 } 6144 6145 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, 6146 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules, 6147 NULL); 6148 if (status) 6149 goto err_ice_add_adv_rule; 6150 adv_fltr = devm_kzalloc(ice_hw_to_dev(hw), 6151 sizeof(struct ice_adv_fltr_mgmt_list_entry), 6152 GFP_KERNEL); 6153 if (!adv_fltr) { 6154 status = -ENOMEM; 6155 goto err_ice_add_adv_rule; 6156 } 6157 6158 adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups, 6159 lkups_cnt * sizeof(*lkups), GFP_KERNEL); 6160 if (!adv_fltr->lkups) { 6161 status = -ENOMEM; 6162 goto err_ice_add_adv_rule; 6163 } 6164 6165 adv_fltr->lkups_cnt = lkups_cnt; 6166 adv_fltr->rule_info = *rinfo; 6167 adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index); 6168 sw = hw->switch_info; 6169 sw->recp_list[rid].adv_rule = true; 6170 rule_head = &sw->recp_list[rid].filt_rules; 6171 6172 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) 6173 adv_fltr->vsi_count = 1; 6174 6175 /* Add rule entry to book keeping list */ 6176 list_add(&adv_fltr->list_entry, rule_head); 6177 if (added_entry) { 6178 added_entry->rid = rid; 6179 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id; 6180 added_entry->vsi_handle = rinfo->sw_act.vsi_handle; 6181 } 6182 err_ice_add_adv_rule: 6183 if (status && adv_fltr) { 6184 devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups); 6185 devm_kfree(ice_hw_to_dev(hw), adv_fltr); 6186 } 6187 6188 kfree(s_rule); 6189 6190 free_pkt_profile: 6191 if (profile->match & ICE_PKT_KMALLOC) { 6192 kfree(profile->offsets); 6193 kfree(profile->pkt); 6194 kfree(profile); 6195 } 6196 6197 return status; 6198 } 6199 6200 /** 6201 * ice_replay_vsi_fltr - Replay filters for requested VSI 6202 * @hw: pointer to the hardware structure 6203 * @vsi_handle: driver VSI handle 6204 * @recp_id: Recipe ID for which rules need to be replayed 6205 * @list_head: list for which filters need to be replayed 6206 * 6207 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. 6208 * It is required to pass valid VSI handle. 6209 */ 6210 static int 6211 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, 6212 struct list_head *list_head) 6213 { 6214 struct ice_fltr_mgmt_list_entry *itr; 6215 int status = 0; 6216 u16 hw_vsi_id; 6217 6218 if (list_empty(list_head)) 6219 return status; 6220 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 6221 6222 list_for_each_entry(itr, list_head, list_entry) { 6223 struct ice_fltr_list_entry f_entry; 6224 6225 f_entry.fltr_info = itr->fltr_info; 6226 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && 6227 itr->fltr_info.vsi_handle == vsi_handle) { 6228 /* update the src in case it is VSI num */ 6229 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 6230 f_entry.fltr_info.src = hw_vsi_id; 6231 status = ice_add_rule_internal(hw, recp_id, &f_entry); 6232 if (status) 6233 goto end; 6234 continue; 6235 } 6236 if (!itr->vsi_list_info || 6237 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) 6238 continue; 6239 /* Clearing it so that the logic can add it back */ 6240 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); 6241 f_entry.fltr_info.vsi_handle = vsi_handle; 6242 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 6243 /* update the src in case it is VSI num */ 6244 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 6245 f_entry.fltr_info.src = hw_vsi_id; 6246 if (recp_id == ICE_SW_LKUP_VLAN) 6247 status = ice_add_vlan_internal(hw, &f_entry); 6248 else 6249 status = ice_add_rule_internal(hw, recp_id, &f_entry); 6250 if (status) 6251 goto end; 6252 } 6253 end: 6254 return status; 6255 } 6256 6257 /** 6258 * ice_adv_rem_update_vsi_list 6259 * @hw: pointer to the hardware structure 6260 * @vsi_handle: VSI handle of the VSI to remove 6261 * @fm_list: filter management entry for which the VSI list management needs to 6262 * be done 6263 */ 6264 static int 6265 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, 6266 struct ice_adv_fltr_mgmt_list_entry *fm_list) 6267 { 6268 struct ice_vsi_list_map_info *vsi_list_info; 6269 enum ice_sw_lkup_type lkup_type; 6270 u16 vsi_list_id; 6271 int status; 6272 6273 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST || 6274 fm_list->vsi_count == 0) 6275 return -EINVAL; 6276 6277 /* A rule with the VSI being removed does not exist */ 6278 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) 6279 return -ENOENT; 6280 6281 lkup_type = ICE_SW_LKUP_LAST; 6282 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id; 6283 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, 6284 ice_aqc_opc_update_sw_rules, 6285 lkup_type); 6286 if (status) 6287 return status; 6288 6289 fm_list->vsi_count--; 6290 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 6291 vsi_list_info = fm_list->vsi_list_info; 6292 if (fm_list->vsi_count == 1) { 6293 struct ice_fltr_info tmp_fltr; 6294 u16 rem_vsi_handle; 6295 6296 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, 6297 ICE_MAX_VSI); 6298 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 6299 return -EIO; 6300 6301 /* Make sure VSI list is empty before removing it below */ 6302 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 6303 vsi_list_id, true, 6304 ice_aqc_opc_update_sw_rules, 6305 lkup_type); 6306 if (status) 6307 return status; 6308 6309 memset(&tmp_fltr, 0, sizeof(tmp_fltr)); 6310 tmp_fltr.flag = fm_list->rule_info.sw_act.flag; 6311 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id; 6312 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; 6313 tmp_fltr.fltr_act = ICE_FWD_TO_VSI; 6314 tmp_fltr.fwd_id.hw_vsi_id = 6315 ice_get_hw_vsi_num(hw, rem_vsi_handle); 6316 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id = 6317 ice_get_hw_vsi_num(hw, rem_vsi_handle); 6318 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle; 6319 6320 /* Update the previous switch rule of "MAC forward to VSI" to 6321 * "MAC fwd to VSI list" 6322 */ 6323 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 6324 if (status) { 6325 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", 6326 tmp_fltr.fwd_id.hw_vsi_id, status); 6327 return status; 6328 } 6329 fm_list->vsi_list_info->ref_cnt--; 6330 6331 /* Remove the VSI list since it is no longer used */ 6332 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 6333 if (status) { 6334 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", 6335 vsi_list_id, status); 6336 return status; 6337 } 6338 6339 list_del(&vsi_list_info->list_entry); 6340 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 6341 fm_list->vsi_list_info = NULL; 6342 } 6343 6344 return status; 6345 } 6346 6347 /** 6348 * ice_rem_adv_rule - removes existing advanced switch rule 6349 * @hw: pointer to the hardware structure 6350 * @lkups: information on the words that needs to be looked up. All words 6351 * together makes one recipe 6352 * @lkups_cnt: num of entries in the lkups array 6353 * @rinfo: Its the pointer to the rule information for the rule 6354 * 6355 * This function can be used to remove 1 rule at a time. The lkups is 6356 * used to describe all the words that forms the "lookup" portion of the 6357 * rule. These words can span multiple protocols. Callers to this function 6358 * need to pass in a list of protocol headers with lookup information along 6359 * and mask that determines which words are valid from the given protocol 6360 * header. rinfo describes other information related to this rule such as 6361 * forwarding IDs, priority of this rule, etc. 6362 */ 6363 static int 6364 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 6365 u16 lkups_cnt, struct ice_adv_rule_info *rinfo) 6366 { 6367 struct ice_adv_fltr_mgmt_list_entry *list_elem; 6368 struct ice_prot_lkup_ext lkup_exts; 6369 bool remove_rule = false; 6370 struct mutex *rule_lock; /* Lock to protect filter rule list */ 6371 u16 i, rid, vsi_handle; 6372 int status = 0; 6373 6374 memset(&lkup_exts, 0, sizeof(lkup_exts)); 6375 for (i = 0; i < lkups_cnt; i++) { 6376 u16 count; 6377 6378 if (lkups[i].type >= ICE_PROTOCOL_LAST) 6379 return -EIO; 6380 6381 count = ice_fill_valid_words(&lkups[i], &lkup_exts); 6382 if (!count) 6383 return -EIO; 6384 } 6385 6386 /* Create any special protocol/offset pairs, such as looking at tunnel 6387 * bits by extracting metadata 6388 */ 6389 status = ice_add_special_words(rinfo, &lkup_exts, ice_is_dvm_ena(hw)); 6390 if (status) 6391 return status; 6392 6393 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type); 6394 /* If did not find a recipe that match the existing criteria */ 6395 if (rid == ICE_MAX_NUM_RECIPES) 6396 return -EINVAL; 6397 6398 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock; 6399 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); 6400 /* the rule is already removed */ 6401 if (!list_elem) 6402 return 0; 6403 mutex_lock(rule_lock); 6404 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) { 6405 remove_rule = true; 6406 } else if (list_elem->vsi_count > 1) { 6407 remove_rule = false; 6408 vsi_handle = rinfo->sw_act.vsi_handle; 6409 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); 6410 } else { 6411 vsi_handle = rinfo->sw_act.vsi_handle; 6412 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); 6413 if (status) { 6414 mutex_unlock(rule_lock); 6415 return status; 6416 } 6417 if (list_elem->vsi_count == 0) 6418 remove_rule = true; 6419 } 6420 mutex_unlock(rule_lock); 6421 if (remove_rule) { 6422 struct ice_sw_rule_lkup_rx_tx *s_rule; 6423 u16 rule_buf_sz; 6424 6425 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule); 6426 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); 6427 if (!s_rule) 6428 return -ENOMEM; 6429 s_rule->act = 0; 6430 s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id); 6431 s_rule->hdr_len = 0; 6432 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, 6433 rule_buf_sz, 1, 6434 ice_aqc_opc_remove_sw_rules, NULL); 6435 if (!status || status == -ENOENT) { 6436 struct ice_switch_info *sw = hw->switch_info; 6437 6438 mutex_lock(rule_lock); 6439 list_del(&list_elem->list_entry); 6440 devm_kfree(ice_hw_to_dev(hw), list_elem->lkups); 6441 devm_kfree(ice_hw_to_dev(hw), list_elem); 6442 mutex_unlock(rule_lock); 6443 if (list_empty(&sw->recp_list[rid].filt_rules)) 6444 sw->recp_list[rid].adv_rule = false; 6445 } 6446 kfree(s_rule); 6447 } 6448 return status; 6449 } 6450 6451 /** 6452 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID 6453 * @hw: pointer to the hardware structure 6454 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID 6455 * 6456 * This function is used to remove 1 rule at a time. The removal is based on 6457 * the remove_entry parameter. This function will remove rule for a given 6458 * vsi_handle with a given rule_id which is passed as parameter in remove_entry 6459 */ 6460 int 6461 ice_rem_adv_rule_by_id(struct ice_hw *hw, 6462 struct ice_rule_query_data *remove_entry) 6463 { 6464 struct ice_adv_fltr_mgmt_list_entry *list_itr; 6465 struct list_head *list_head; 6466 struct ice_adv_rule_info rinfo; 6467 struct ice_switch_info *sw; 6468 6469 sw = hw->switch_info; 6470 if (!sw->recp_list[remove_entry->rid].recp_created) 6471 return -EINVAL; 6472 list_head = &sw->recp_list[remove_entry->rid].filt_rules; 6473 list_for_each_entry(list_itr, list_head, list_entry) { 6474 if (list_itr->rule_info.fltr_rule_id == 6475 remove_entry->rule_id) { 6476 rinfo = list_itr->rule_info; 6477 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle; 6478 return ice_rem_adv_rule(hw, list_itr->lkups, 6479 list_itr->lkups_cnt, &rinfo); 6480 } 6481 } 6482 /* either list is empty or unable to find rule */ 6483 return -ENOENT; 6484 } 6485 6486 /** 6487 * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a 6488 * given VSI handle 6489 * @hw: pointer to the hardware structure 6490 * @vsi_handle: VSI handle for which we are supposed to remove all the rules. 6491 * 6492 * This function is used to remove all the rules for a given VSI and as soon 6493 * as removing a rule fails, it will return immediately with the error code, 6494 * else it will return success. 6495 */ 6496 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle) 6497 { 6498 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry; 6499 struct ice_vsi_list_map_info *map_info; 6500 struct ice_adv_rule_info rinfo; 6501 struct list_head *list_head; 6502 struct ice_switch_info *sw; 6503 int status; 6504 u8 rid; 6505 6506 sw = hw->switch_info; 6507 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) { 6508 if (!sw->recp_list[rid].recp_created) 6509 continue; 6510 if (!sw->recp_list[rid].adv_rule) 6511 continue; 6512 6513 list_head = &sw->recp_list[rid].filt_rules; 6514 list_for_each_entry_safe(list_itr, tmp_entry, list_head, 6515 list_entry) { 6516 rinfo = list_itr->rule_info; 6517 6518 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) { 6519 map_info = list_itr->vsi_list_info; 6520 if (!map_info) 6521 continue; 6522 6523 if (!test_bit(vsi_handle, map_info->vsi_map)) 6524 continue; 6525 } else if (rinfo.sw_act.vsi_handle != vsi_handle) { 6526 continue; 6527 } 6528 6529 rinfo.sw_act.vsi_handle = vsi_handle; 6530 status = ice_rem_adv_rule(hw, list_itr->lkups, 6531 list_itr->lkups_cnt, &rinfo); 6532 if (status) 6533 return status; 6534 } 6535 } 6536 return 0; 6537 } 6538 6539 /** 6540 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI 6541 * @hw: pointer to the hardware structure 6542 * @vsi_handle: driver VSI handle 6543 * @list_head: list for which filters need to be replayed 6544 * 6545 * Replay the advanced rule for the given VSI. 6546 */ 6547 static int 6548 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle, 6549 struct list_head *list_head) 6550 { 6551 struct ice_rule_query_data added_entry = { 0 }; 6552 struct ice_adv_fltr_mgmt_list_entry *adv_fltr; 6553 int status = 0; 6554 6555 if (list_empty(list_head)) 6556 return status; 6557 list_for_each_entry(adv_fltr, list_head, list_entry) { 6558 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info; 6559 u16 lk_cnt = adv_fltr->lkups_cnt; 6560 6561 if (vsi_handle != rinfo->sw_act.vsi_handle) 6562 continue; 6563 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo, 6564 &added_entry); 6565 if (status) 6566 break; 6567 } 6568 return status; 6569 } 6570 6571 /** 6572 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists 6573 * @hw: pointer to the hardware structure 6574 * @vsi_handle: driver VSI handle 6575 * 6576 * Replays filters for requested VSI via vsi_handle. 6577 */ 6578 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) 6579 { 6580 struct ice_switch_info *sw = hw->switch_info; 6581 int status; 6582 u8 i; 6583 6584 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 6585 struct list_head *head; 6586 6587 head = &sw->recp_list[i].filt_replay_rules; 6588 if (!sw->recp_list[i].adv_rule) 6589 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head); 6590 else 6591 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head); 6592 if (status) 6593 return status; 6594 } 6595 return status; 6596 } 6597 6598 /** 6599 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules 6600 * @hw: pointer to the HW struct 6601 * 6602 * Deletes the filter replay rules. 6603 */ 6604 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw) 6605 { 6606 struct ice_switch_info *sw = hw->switch_info; 6607 u8 i; 6608 6609 if (!sw) 6610 return; 6611 6612 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 6613 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) { 6614 struct list_head *l_head; 6615 6616 l_head = &sw->recp_list[i].filt_replay_rules; 6617 if (!sw->recp_list[i].adv_rule) 6618 ice_rem_sw_rule_info(hw, l_head); 6619 else 6620 ice_rem_adv_rule_info(hw, l_head); 6621 } 6622 } 6623 } 6624