1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_lib.h" 5 #include "ice_switch.h" 6 7 #define ICE_ETH_DA_OFFSET 0 8 #define ICE_ETH_ETHTYPE_OFFSET 12 9 #define ICE_ETH_VLAN_TCI_OFFSET 14 10 #define ICE_MAX_VLAN_ID 0xFFF 11 #define ICE_IPV6_ETHER_ID 0x86DD 12 13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem 14 * struct to configure any switch filter rules. 15 * {DA (6 bytes), SA(6 bytes), 16 * Ether type (2 bytes for header without VLAN tag) OR 17 * VLAN tag (4 bytes for header with VLAN tag) } 18 * 19 * Word on Hardcoded values 20 * byte 0 = 0x2: to identify it as locally administered DA MAC 21 * byte 6 = 0x2: to identify it as locally administered SA MAC 22 * byte 12 = 0x81 & byte 13 = 0x00: 23 * In case of VLAN filter first two bytes defines ether type (0x8100) 24 * and remaining two bytes are placeholder for programming a given VLAN ID 25 * In case of Ether type filter it is treated as header without VLAN tag 26 * and byte 12 and 13 is used to program a given Ether type instead 27 */ 28 #define DUMMY_ETH_HDR_LEN 16 29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 30 0x2, 0, 0, 0, 0, 0, 31 0x81, 0, 0, 0}; 32 33 struct ice_dummy_pkt_offsets { 34 enum ice_protocol_type type; 35 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ 36 }; 37 38 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = { 39 { ICE_MAC_OFOS, 0 }, 40 { ICE_ETYPE_OL, 12 }, 41 { ICE_IPV4_OFOS, 14 }, 42 { ICE_NVGRE, 34 }, 43 { ICE_MAC_IL, 42 }, 44 { ICE_IPV4_IL, 56 }, 45 { ICE_TCP_IL, 76 }, 46 { ICE_PROTOCOL_LAST, 0 }, 47 }; 48 49 static const u8 dummy_gre_tcp_packet[] = { 50 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 51 0x00, 0x00, 0x00, 0x00, 52 0x00, 0x00, 0x00, 0x00, 53 54 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 55 56 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */ 57 0x00, 0x00, 0x00, 0x00, 58 0x00, 0x2F, 0x00, 0x00, 59 0x00, 0x00, 0x00, 0x00, 60 0x00, 0x00, 0x00, 0x00, 61 62 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ 63 0x00, 0x00, 0x00, 0x00, 64 65 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 66 0x00, 0x00, 0x00, 0x00, 67 0x00, 0x00, 0x00, 0x00, 68 0x08, 0x00, 69 70 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ 71 0x00, 0x00, 0x00, 0x00, 72 0x00, 0x06, 0x00, 0x00, 73 0x00, 0x00, 0x00, 0x00, 74 0x00, 0x00, 0x00, 0x00, 75 76 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */ 77 0x00, 0x00, 0x00, 0x00, 78 0x00, 0x00, 0x00, 0x00, 79 0x50, 0x02, 0x20, 0x00, 80 0x00, 0x00, 0x00, 0x00 81 }; 82 83 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = { 84 { ICE_MAC_OFOS, 0 }, 85 { ICE_ETYPE_OL, 12 }, 86 { ICE_IPV4_OFOS, 14 }, 87 { ICE_NVGRE, 34 }, 88 { ICE_MAC_IL, 42 }, 89 { ICE_IPV4_IL, 56 }, 90 { ICE_UDP_ILOS, 76 }, 91 { ICE_PROTOCOL_LAST, 0 }, 92 }; 93 94 static const u8 dummy_gre_udp_packet[] = { 95 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 96 0x00, 0x00, 0x00, 0x00, 97 0x00, 0x00, 0x00, 0x00, 98 99 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 100 101 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */ 102 0x00, 0x00, 0x00, 0x00, 103 0x00, 0x2F, 0x00, 0x00, 104 0x00, 0x00, 0x00, 0x00, 105 0x00, 0x00, 0x00, 0x00, 106 107 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ 108 0x00, 0x00, 0x00, 0x00, 109 110 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 111 0x00, 0x00, 0x00, 0x00, 112 0x00, 0x00, 0x00, 0x00, 113 0x08, 0x00, 114 115 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ 116 0x00, 0x00, 0x00, 0x00, 117 0x00, 0x11, 0x00, 0x00, 118 0x00, 0x00, 0x00, 0x00, 119 0x00, 0x00, 0x00, 0x00, 120 121 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */ 122 0x00, 0x08, 0x00, 0x00, 123 }; 124 125 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { 126 { ICE_MAC_OFOS, 0 }, 127 { ICE_ETYPE_OL, 12 }, 128 { ICE_IPV4_OFOS, 14 }, 129 { ICE_UDP_OF, 34 }, 130 { ICE_VXLAN, 42 }, 131 { ICE_GENEVE, 42 }, 132 { ICE_VXLAN_GPE, 42 }, 133 { ICE_MAC_IL, 50 }, 134 { ICE_IPV4_IL, 64 }, 135 { ICE_TCP_IL, 84 }, 136 { ICE_PROTOCOL_LAST, 0 }, 137 }; 138 139 static const u8 dummy_udp_tun_tcp_packet[] = { 140 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 141 0x00, 0x00, 0x00, 0x00, 142 0x00, 0x00, 0x00, 0x00, 143 144 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 145 146 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */ 147 0x00, 0x01, 0x00, 0x00, 148 0x40, 0x11, 0x00, 0x00, 149 0x00, 0x00, 0x00, 0x00, 150 0x00, 0x00, 0x00, 0x00, 151 152 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ 153 0x00, 0x46, 0x00, 0x00, 154 155 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ 156 0x00, 0x00, 0x00, 0x00, 157 158 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 159 0x00, 0x00, 0x00, 0x00, 160 0x00, 0x00, 0x00, 0x00, 161 0x08, 0x00, 162 163 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */ 164 0x00, 0x01, 0x00, 0x00, 165 0x40, 0x06, 0x00, 0x00, 166 0x00, 0x00, 0x00, 0x00, 167 0x00, 0x00, 0x00, 0x00, 168 169 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */ 170 0x00, 0x00, 0x00, 0x00, 171 0x00, 0x00, 0x00, 0x00, 172 0x50, 0x02, 0x20, 0x00, 173 0x00, 0x00, 0x00, 0x00 174 }; 175 176 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = { 177 { ICE_MAC_OFOS, 0 }, 178 { ICE_ETYPE_OL, 12 }, 179 { ICE_IPV4_OFOS, 14 }, 180 { ICE_UDP_OF, 34 }, 181 { ICE_VXLAN, 42 }, 182 { ICE_GENEVE, 42 }, 183 { ICE_VXLAN_GPE, 42 }, 184 { ICE_MAC_IL, 50 }, 185 { ICE_IPV4_IL, 64 }, 186 { ICE_UDP_ILOS, 84 }, 187 { ICE_PROTOCOL_LAST, 0 }, 188 }; 189 190 static const u8 dummy_udp_tun_udp_packet[] = { 191 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 192 0x00, 0x00, 0x00, 0x00, 193 0x00, 0x00, 0x00, 0x00, 194 195 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 196 197 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */ 198 0x00, 0x01, 0x00, 0x00, 199 0x00, 0x11, 0x00, 0x00, 200 0x00, 0x00, 0x00, 0x00, 201 0x00, 0x00, 0x00, 0x00, 202 203 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ 204 0x00, 0x3a, 0x00, 0x00, 205 206 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ 207 0x00, 0x00, 0x00, 0x00, 208 209 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 210 0x00, 0x00, 0x00, 0x00, 211 0x00, 0x00, 0x00, 0x00, 212 0x08, 0x00, 213 214 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */ 215 0x00, 0x01, 0x00, 0x00, 216 0x00, 0x11, 0x00, 0x00, 217 0x00, 0x00, 0x00, 0x00, 218 0x00, 0x00, 0x00, 0x00, 219 220 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */ 221 0x00, 0x08, 0x00, 0x00, 222 }; 223 224 /* offset info for MAC + IPv4 + UDP dummy packet */ 225 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { 226 { ICE_MAC_OFOS, 0 }, 227 { ICE_ETYPE_OL, 12 }, 228 { ICE_IPV4_OFOS, 14 }, 229 { ICE_UDP_ILOS, 34 }, 230 { ICE_PROTOCOL_LAST, 0 }, 231 }; 232 233 /* Dummy packet for MAC + IPv4 + UDP */ 234 static const u8 dummy_udp_packet[] = { 235 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 236 0x00, 0x00, 0x00, 0x00, 237 0x00, 0x00, 0x00, 0x00, 238 239 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 240 241 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */ 242 0x00, 0x01, 0x00, 0x00, 243 0x00, 0x11, 0x00, 0x00, 244 0x00, 0x00, 0x00, 0x00, 245 0x00, 0x00, 0x00, 0x00, 246 247 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */ 248 0x00, 0x08, 0x00, 0x00, 249 250 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 251 }; 252 253 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */ 254 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = { 255 { ICE_MAC_OFOS, 0 }, 256 { ICE_VLAN_OFOS, 12 }, 257 { ICE_ETYPE_OL, 16 }, 258 { ICE_IPV4_OFOS, 18 }, 259 { ICE_UDP_ILOS, 38 }, 260 { ICE_PROTOCOL_LAST, 0 }, 261 }; 262 263 /* C-tag (801.1Q), IPv4:UDP dummy packet */ 264 static const u8 dummy_vlan_udp_packet[] = { 265 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 266 0x00, 0x00, 0x00, 0x00, 267 0x00, 0x00, 0x00, 0x00, 268 269 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 270 271 0x08, 0x00, /* ICE_ETYPE_OL 16 */ 272 273 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */ 274 0x00, 0x01, 0x00, 0x00, 275 0x00, 0x11, 0x00, 0x00, 276 0x00, 0x00, 0x00, 0x00, 277 0x00, 0x00, 0x00, 0x00, 278 279 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */ 280 0x00, 0x08, 0x00, 0x00, 281 282 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 283 }; 284 285 /* offset info for MAC + IPv4 + TCP dummy packet */ 286 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = { 287 { ICE_MAC_OFOS, 0 }, 288 { ICE_ETYPE_OL, 12 }, 289 { ICE_IPV4_OFOS, 14 }, 290 { ICE_TCP_IL, 34 }, 291 { ICE_PROTOCOL_LAST, 0 }, 292 }; 293 294 /* Dummy packet for MAC + IPv4 + TCP */ 295 static const u8 dummy_tcp_packet[] = { 296 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 297 0x00, 0x00, 0x00, 0x00, 298 0x00, 0x00, 0x00, 0x00, 299 300 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 301 302 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */ 303 0x00, 0x01, 0x00, 0x00, 304 0x00, 0x06, 0x00, 0x00, 305 0x00, 0x00, 0x00, 0x00, 306 0x00, 0x00, 0x00, 0x00, 307 308 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */ 309 0x00, 0x00, 0x00, 0x00, 310 0x00, 0x00, 0x00, 0x00, 311 0x50, 0x00, 0x00, 0x00, 312 0x00, 0x00, 0x00, 0x00, 313 314 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 315 }; 316 317 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */ 318 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = { 319 { ICE_MAC_OFOS, 0 }, 320 { ICE_VLAN_OFOS, 12 }, 321 { ICE_ETYPE_OL, 16 }, 322 { ICE_IPV4_OFOS, 18 }, 323 { ICE_TCP_IL, 38 }, 324 { ICE_PROTOCOL_LAST, 0 }, 325 }; 326 327 /* C-tag (801.1Q), IPv4:TCP dummy packet */ 328 static const u8 dummy_vlan_tcp_packet[] = { 329 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 330 0x00, 0x00, 0x00, 0x00, 331 0x00, 0x00, 0x00, 0x00, 332 333 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 334 335 0x08, 0x00, /* ICE_ETYPE_OL 16 */ 336 337 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */ 338 0x00, 0x01, 0x00, 0x00, 339 0x00, 0x06, 0x00, 0x00, 340 0x00, 0x00, 0x00, 0x00, 341 0x00, 0x00, 0x00, 0x00, 342 343 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */ 344 0x00, 0x00, 0x00, 0x00, 345 0x00, 0x00, 0x00, 0x00, 346 0x50, 0x00, 0x00, 0x00, 347 0x00, 0x00, 0x00, 0x00, 348 349 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 350 }; 351 352 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = { 353 { ICE_MAC_OFOS, 0 }, 354 { ICE_ETYPE_OL, 12 }, 355 { ICE_IPV6_OFOS, 14 }, 356 { ICE_TCP_IL, 54 }, 357 { ICE_PROTOCOL_LAST, 0 }, 358 }; 359 360 static const u8 dummy_tcp_ipv6_packet[] = { 361 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 362 0x00, 0x00, 0x00, 0x00, 363 0x00, 0x00, 0x00, 0x00, 364 365 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ 366 367 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ 368 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ 369 0x00, 0x00, 0x00, 0x00, 370 0x00, 0x00, 0x00, 0x00, 371 0x00, 0x00, 0x00, 0x00, 372 0x00, 0x00, 0x00, 0x00, 373 0x00, 0x00, 0x00, 0x00, 374 0x00, 0x00, 0x00, 0x00, 375 0x00, 0x00, 0x00, 0x00, 376 0x00, 0x00, 0x00, 0x00, 377 378 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */ 379 0x00, 0x00, 0x00, 0x00, 380 0x00, 0x00, 0x00, 0x00, 381 0x50, 0x00, 0x00, 0x00, 382 0x00, 0x00, 0x00, 0x00, 383 384 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 385 }; 386 387 /* C-tag (802.1Q): IPv6 + TCP */ 388 static const struct ice_dummy_pkt_offsets 389 dummy_vlan_tcp_ipv6_packet_offsets[] = { 390 { ICE_MAC_OFOS, 0 }, 391 { ICE_VLAN_OFOS, 12 }, 392 { ICE_ETYPE_OL, 16 }, 393 { ICE_IPV6_OFOS, 18 }, 394 { ICE_TCP_IL, 58 }, 395 { ICE_PROTOCOL_LAST, 0 }, 396 }; 397 398 /* C-tag (802.1Q), IPv6 + TCP dummy packet */ 399 static const u8 dummy_vlan_tcp_ipv6_packet[] = { 400 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 401 0x00, 0x00, 0x00, 0x00, 402 0x00, 0x00, 0x00, 0x00, 403 404 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 405 406 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ 407 408 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ 409 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ 410 0x00, 0x00, 0x00, 0x00, 411 0x00, 0x00, 0x00, 0x00, 412 0x00, 0x00, 0x00, 0x00, 413 0x00, 0x00, 0x00, 0x00, 414 0x00, 0x00, 0x00, 0x00, 415 0x00, 0x00, 0x00, 0x00, 416 0x00, 0x00, 0x00, 0x00, 417 0x00, 0x00, 0x00, 0x00, 418 419 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */ 420 0x00, 0x00, 0x00, 0x00, 421 0x00, 0x00, 0x00, 0x00, 422 0x50, 0x00, 0x00, 0x00, 423 0x00, 0x00, 0x00, 0x00, 424 425 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 426 }; 427 428 /* IPv6 + UDP */ 429 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = { 430 { ICE_MAC_OFOS, 0 }, 431 { ICE_ETYPE_OL, 12 }, 432 { ICE_IPV6_OFOS, 14 }, 433 { ICE_UDP_ILOS, 54 }, 434 { ICE_PROTOCOL_LAST, 0 }, 435 }; 436 437 /* IPv6 + UDP dummy packet */ 438 static const u8 dummy_udp_ipv6_packet[] = { 439 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 440 0x00, 0x00, 0x00, 0x00, 441 0x00, 0x00, 0x00, 0x00, 442 443 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ 444 445 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ 446 0x00, 0x10, 0x11, 0x00, /* Next header UDP */ 447 0x00, 0x00, 0x00, 0x00, 448 0x00, 0x00, 0x00, 0x00, 449 0x00, 0x00, 0x00, 0x00, 450 0x00, 0x00, 0x00, 0x00, 451 0x00, 0x00, 0x00, 0x00, 452 0x00, 0x00, 0x00, 0x00, 453 0x00, 0x00, 0x00, 0x00, 454 0x00, 0x00, 0x00, 0x00, 455 456 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */ 457 0x00, 0x10, 0x00, 0x00, 458 459 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */ 460 0x00, 0x00, 0x00, 0x00, 461 462 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 463 }; 464 465 /* C-tag (802.1Q): IPv6 + UDP */ 466 static const struct ice_dummy_pkt_offsets 467 dummy_vlan_udp_ipv6_packet_offsets[] = { 468 { ICE_MAC_OFOS, 0 }, 469 { ICE_VLAN_OFOS, 12 }, 470 { ICE_ETYPE_OL, 16 }, 471 { ICE_IPV6_OFOS, 18 }, 472 { ICE_UDP_ILOS, 58 }, 473 { ICE_PROTOCOL_LAST, 0 }, 474 }; 475 476 /* C-tag (802.1Q), IPv6 + UDP dummy packet */ 477 static const u8 dummy_vlan_udp_ipv6_packet[] = { 478 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 479 0x00, 0x00, 0x00, 0x00, 480 0x00, 0x00, 0x00, 0x00, 481 482 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */ 483 484 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ 485 486 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ 487 0x00, 0x08, 0x11, 0x00, /* Next header UDP */ 488 0x00, 0x00, 0x00, 0x00, 489 0x00, 0x00, 0x00, 0x00, 490 0x00, 0x00, 0x00, 0x00, 491 0x00, 0x00, 0x00, 0x00, 492 0x00, 0x00, 0x00, 0x00, 493 0x00, 0x00, 0x00, 0x00, 494 0x00, 0x00, 0x00, 0x00, 495 0x00, 0x00, 0x00, 0x00, 496 497 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */ 498 0x00, 0x08, 0x00, 0x00, 499 500 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 501 }; 502 503 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ 504 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \ 505 (DUMMY_ETH_HDR_LEN * \ 506 sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0]))) 507 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ 508 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr)) 509 #define ICE_SW_RULE_LG_ACT_SIZE(n) \ 510 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \ 511 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0]))) 512 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \ 513 (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \ 514 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0]))) 515 516 /* this is a recipe to profile association bitmap */ 517 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES], 518 ICE_MAX_NUM_PROFILES); 519 520 /* this is a profile to recipe association bitmap */ 521 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES], 522 ICE_MAX_NUM_RECIPES); 523 524 /** 525 * ice_init_def_sw_recp - initialize the recipe book keeping tables 526 * @hw: pointer to the HW struct 527 * 528 * Allocate memory for the entire recipe table and initialize the structures/ 529 * entries corresponding to basic recipes. 530 */ 531 int ice_init_def_sw_recp(struct ice_hw *hw) 532 { 533 struct ice_sw_recipe *recps; 534 u8 i; 535 536 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, 537 sizeof(*recps), GFP_KERNEL); 538 if (!recps) 539 return -ENOMEM; 540 541 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 542 recps[i].root_rid = i; 543 INIT_LIST_HEAD(&recps[i].filt_rules); 544 INIT_LIST_HEAD(&recps[i].filt_replay_rules); 545 INIT_LIST_HEAD(&recps[i].rg_list); 546 mutex_init(&recps[i].filt_rule_lock); 547 } 548 549 hw->switch_info->recp_list = recps; 550 551 return 0; 552 } 553 554 /** 555 * ice_aq_get_sw_cfg - get switch configuration 556 * @hw: pointer to the hardware structure 557 * @buf: pointer to the result buffer 558 * @buf_size: length of the buffer available for response 559 * @req_desc: pointer to requested descriptor 560 * @num_elems: pointer to number of elements 561 * @cd: pointer to command details structure or NULL 562 * 563 * Get switch configuration (0x0200) to be placed in buf. 564 * This admin command returns information such as initial VSI/port number 565 * and switch ID it belongs to. 566 * 567 * NOTE: *req_desc is both an input/output parameter. 568 * The caller of this function first calls this function with *request_desc set 569 * to 0. If the response from f/w has *req_desc set to 0, all the switch 570 * configuration information has been returned; if non-zero (meaning not all 571 * the information was returned), the caller should call this function again 572 * with *req_desc set to the previous value returned by f/w to get the 573 * next block of switch configuration information. 574 * 575 * *num_elems is output only parameter. This reflects the number of elements 576 * in response buffer. The caller of this function to use *num_elems while 577 * parsing the response buffer. 578 */ 579 static int 580 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, 581 u16 buf_size, u16 *req_desc, u16 *num_elems, 582 struct ice_sq_cd *cd) 583 { 584 struct ice_aqc_get_sw_cfg *cmd; 585 struct ice_aq_desc desc; 586 int status; 587 588 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); 589 cmd = &desc.params.get_sw_conf; 590 cmd->element = cpu_to_le16(*req_desc); 591 592 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 593 if (!status) { 594 *req_desc = le16_to_cpu(cmd->element); 595 *num_elems = le16_to_cpu(cmd->num_elems); 596 } 597 598 return status; 599 } 600 601 /** 602 * ice_aq_add_vsi 603 * @hw: pointer to the HW struct 604 * @vsi_ctx: pointer to a VSI context struct 605 * @cd: pointer to command details structure or NULL 606 * 607 * Add a VSI context to the hardware (0x0210) 608 */ 609 static int 610 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 611 struct ice_sq_cd *cd) 612 { 613 struct ice_aqc_add_update_free_vsi_resp *res; 614 struct ice_aqc_add_get_update_free_vsi *cmd; 615 struct ice_aq_desc desc; 616 int status; 617 618 cmd = &desc.params.vsi_cmd; 619 res = &desc.params.add_update_free_vsi_res; 620 621 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); 622 623 if (!vsi_ctx->alloc_from_pool) 624 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | 625 ICE_AQ_VSI_IS_VALID); 626 cmd->vf_id = vsi_ctx->vf_num; 627 628 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 629 630 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 631 632 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 633 sizeof(vsi_ctx->info), cd); 634 635 if (!status) { 636 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M; 637 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used); 638 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free); 639 } 640 641 return status; 642 } 643 644 /** 645 * ice_aq_free_vsi 646 * @hw: pointer to the HW struct 647 * @vsi_ctx: pointer to a VSI context struct 648 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 649 * @cd: pointer to command details structure or NULL 650 * 651 * Free VSI context info from hardware (0x0213) 652 */ 653 static int 654 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 655 bool keep_vsi_alloc, struct ice_sq_cd *cd) 656 { 657 struct ice_aqc_add_update_free_vsi_resp *resp; 658 struct ice_aqc_add_get_update_free_vsi *cmd; 659 struct ice_aq_desc desc; 660 int status; 661 662 cmd = &desc.params.vsi_cmd; 663 resp = &desc.params.add_update_free_vsi_res; 664 665 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); 666 667 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 668 if (keep_vsi_alloc) 669 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); 670 671 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 672 if (!status) { 673 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 674 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 675 } 676 677 return status; 678 } 679 680 /** 681 * ice_aq_update_vsi 682 * @hw: pointer to the HW struct 683 * @vsi_ctx: pointer to a VSI context struct 684 * @cd: pointer to command details structure or NULL 685 * 686 * Update VSI context in the hardware (0x0211) 687 */ 688 static int 689 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 690 struct ice_sq_cd *cd) 691 { 692 struct ice_aqc_add_update_free_vsi_resp *resp; 693 struct ice_aqc_add_get_update_free_vsi *cmd; 694 struct ice_aq_desc desc; 695 int status; 696 697 cmd = &desc.params.vsi_cmd; 698 resp = &desc.params.add_update_free_vsi_res; 699 700 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); 701 702 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 703 704 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 705 706 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 707 sizeof(vsi_ctx->info), cd); 708 709 if (!status) { 710 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 711 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 712 } 713 714 return status; 715 } 716 717 /** 718 * ice_is_vsi_valid - check whether the VSI is valid or not 719 * @hw: pointer to the HW struct 720 * @vsi_handle: VSI handle 721 * 722 * check whether the VSI is valid or not 723 */ 724 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle) 725 { 726 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle]; 727 } 728 729 /** 730 * ice_get_hw_vsi_num - return the HW VSI number 731 * @hw: pointer to the HW struct 732 * @vsi_handle: VSI handle 733 * 734 * return the HW VSI number 735 * Caution: call this function only if VSI is valid (ice_is_vsi_valid) 736 */ 737 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) 738 { 739 return hw->vsi_ctx[vsi_handle]->vsi_num; 740 } 741 742 /** 743 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle 744 * @hw: pointer to the HW struct 745 * @vsi_handle: VSI handle 746 * 747 * return the VSI context entry for a given VSI handle 748 */ 749 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 750 { 751 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle]; 752 } 753 754 /** 755 * ice_save_vsi_ctx - save the VSI context for a given VSI handle 756 * @hw: pointer to the HW struct 757 * @vsi_handle: VSI handle 758 * @vsi: VSI context pointer 759 * 760 * save the VSI context entry for a given VSI handle 761 */ 762 static void 763 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) 764 { 765 hw->vsi_ctx[vsi_handle] = vsi; 766 } 767 768 /** 769 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs 770 * @hw: pointer to the HW struct 771 * @vsi_handle: VSI handle 772 */ 773 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle) 774 { 775 struct ice_vsi_ctx *vsi; 776 u8 i; 777 778 vsi = ice_get_vsi_ctx(hw, vsi_handle); 779 if (!vsi) 780 return; 781 ice_for_each_traffic_class(i) { 782 if (vsi->lan_q_ctx[i]) { 783 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]); 784 vsi->lan_q_ctx[i] = NULL; 785 } 786 if (vsi->rdma_q_ctx[i]) { 787 devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]); 788 vsi->rdma_q_ctx[i] = NULL; 789 } 790 } 791 } 792 793 /** 794 * ice_clear_vsi_ctx - clear the VSI context entry 795 * @hw: pointer to the HW struct 796 * @vsi_handle: VSI handle 797 * 798 * clear the VSI context entry 799 */ 800 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 801 { 802 struct ice_vsi_ctx *vsi; 803 804 vsi = ice_get_vsi_ctx(hw, vsi_handle); 805 if (vsi) { 806 ice_clear_vsi_q_ctx(hw, vsi_handle); 807 devm_kfree(ice_hw_to_dev(hw), vsi); 808 hw->vsi_ctx[vsi_handle] = NULL; 809 } 810 } 811 812 /** 813 * ice_clear_all_vsi_ctx - clear all the VSI context entries 814 * @hw: pointer to the HW struct 815 */ 816 void ice_clear_all_vsi_ctx(struct ice_hw *hw) 817 { 818 u16 i; 819 820 for (i = 0; i < ICE_MAX_VSI; i++) 821 ice_clear_vsi_ctx(hw, i); 822 } 823 824 /** 825 * ice_add_vsi - add VSI context to the hardware and VSI handle list 826 * @hw: pointer to the HW struct 827 * @vsi_handle: unique VSI handle provided by drivers 828 * @vsi_ctx: pointer to a VSI context struct 829 * @cd: pointer to command details structure or NULL 830 * 831 * Add a VSI context to the hardware also add it into the VSI handle list. 832 * If this function gets called after reset for existing VSIs then update 833 * with the new HW VSI number in the corresponding VSI handle list entry. 834 */ 835 int 836 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 837 struct ice_sq_cd *cd) 838 { 839 struct ice_vsi_ctx *tmp_vsi_ctx; 840 int status; 841 842 if (vsi_handle >= ICE_MAX_VSI) 843 return -EINVAL; 844 status = ice_aq_add_vsi(hw, vsi_ctx, cd); 845 if (status) 846 return status; 847 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 848 if (!tmp_vsi_ctx) { 849 /* Create a new VSI context */ 850 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw), 851 sizeof(*tmp_vsi_ctx), GFP_KERNEL); 852 if (!tmp_vsi_ctx) { 853 ice_aq_free_vsi(hw, vsi_ctx, false, cd); 854 return -ENOMEM; 855 } 856 *tmp_vsi_ctx = *vsi_ctx; 857 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); 858 } else { 859 /* update with new HW VSI num */ 860 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; 861 } 862 863 return 0; 864 } 865 866 /** 867 * ice_free_vsi- free VSI context from hardware and VSI handle list 868 * @hw: pointer to the HW struct 869 * @vsi_handle: unique VSI handle 870 * @vsi_ctx: pointer to a VSI context struct 871 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 872 * @cd: pointer to command details structure or NULL 873 * 874 * Free VSI context info from hardware as well as from VSI handle list 875 */ 876 int 877 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 878 bool keep_vsi_alloc, struct ice_sq_cd *cd) 879 { 880 int status; 881 882 if (!ice_is_vsi_valid(hw, vsi_handle)) 883 return -EINVAL; 884 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 885 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); 886 if (!status) 887 ice_clear_vsi_ctx(hw, vsi_handle); 888 return status; 889 } 890 891 /** 892 * ice_update_vsi 893 * @hw: pointer to the HW struct 894 * @vsi_handle: unique VSI handle 895 * @vsi_ctx: pointer to a VSI context struct 896 * @cd: pointer to command details structure or NULL 897 * 898 * Update VSI context in the hardware 899 */ 900 int 901 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 902 struct ice_sq_cd *cd) 903 { 904 if (!ice_is_vsi_valid(hw, vsi_handle)) 905 return -EINVAL; 906 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 907 return ice_aq_update_vsi(hw, vsi_ctx, cd); 908 } 909 910 /** 911 * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI 912 * @hw: pointer to HW struct 913 * @vsi_handle: VSI SW index 914 * @enable: boolean for enable/disable 915 */ 916 int 917 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) 918 { 919 struct ice_vsi_ctx *ctx; 920 921 ctx = ice_get_vsi_ctx(hw, vsi_handle); 922 if (!ctx) 923 return -EIO; 924 925 if (enable) 926 ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; 927 else 928 ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; 929 930 return ice_update_vsi(hw, vsi_handle, ctx, NULL); 931 } 932 933 /** 934 * ice_aq_alloc_free_vsi_list 935 * @hw: pointer to the HW struct 936 * @vsi_list_id: VSI list ID returned or used for lookup 937 * @lkup_type: switch rule filter lookup type 938 * @opc: switch rules population command type - pass in the command opcode 939 * 940 * allocates or free a VSI list resource 941 */ 942 static int 943 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, 944 enum ice_sw_lkup_type lkup_type, 945 enum ice_adminq_opc opc) 946 { 947 struct ice_aqc_alloc_free_res_elem *sw_buf; 948 struct ice_aqc_res_elem *vsi_ele; 949 u16 buf_len; 950 int status; 951 952 buf_len = struct_size(sw_buf, elem, 1); 953 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); 954 if (!sw_buf) 955 return -ENOMEM; 956 sw_buf->num_elems = cpu_to_le16(1); 957 958 if (lkup_type == ICE_SW_LKUP_MAC || 959 lkup_type == ICE_SW_LKUP_MAC_VLAN || 960 lkup_type == ICE_SW_LKUP_ETHERTYPE || 961 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 962 lkup_type == ICE_SW_LKUP_PROMISC || 963 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) { 964 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); 965 } else if (lkup_type == ICE_SW_LKUP_VLAN) { 966 sw_buf->res_type = 967 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); 968 } else { 969 status = -EINVAL; 970 goto ice_aq_alloc_free_vsi_list_exit; 971 } 972 973 if (opc == ice_aqc_opc_free_res) 974 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id); 975 976 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL); 977 if (status) 978 goto ice_aq_alloc_free_vsi_list_exit; 979 980 if (opc == ice_aqc_opc_alloc_res) { 981 vsi_ele = &sw_buf->elem[0]; 982 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp); 983 } 984 985 ice_aq_alloc_free_vsi_list_exit: 986 devm_kfree(ice_hw_to_dev(hw), sw_buf); 987 return status; 988 } 989 990 /** 991 * ice_aq_sw_rules - add/update/remove switch rules 992 * @hw: pointer to the HW struct 993 * @rule_list: pointer to switch rule population list 994 * @rule_list_sz: total size of the rule list in bytes 995 * @num_rules: number of switch rules in the rule_list 996 * @opc: switch rules population command type - pass in the command opcode 997 * @cd: pointer to command details structure or NULL 998 * 999 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware 1000 */ 1001 int 1002 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, 1003 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1004 { 1005 struct ice_aq_desc desc; 1006 int status; 1007 1008 if (opc != ice_aqc_opc_add_sw_rules && 1009 opc != ice_aqc_opc_update_sw_rules && 1010 opc != ice_aqc_opc_remove_sw_rules) 1011 return -EINVAL; 1012 1013 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1014 1015 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1016 desc.params.sw_rules.num_rules_fltr_entry_index = 1017 cpu_to_le16(num_rules); 1018 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); 1019 if (opc != ice_aqc_opc_add_sw_rules && 1020 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) 1021 status = -ENOENT; 1022 1023 return status; 1024 } 1025 1026 /** 1027 * ice_aq_add_recipe - add switch recipe 1028 * @hw: pointer to the HW struct 1029 * @s_recipe_list: pointer to switch rule population list 1030 * @num_recipes: number of switch recipes in the list 1031 * @cd: pointer to command details structure or NULL 1032 * 1033 * Add(0x0290) 1034 */ 1035 static int 1036 ice_aq_add_recipe(struct ice_hw *hw, 1037 struct ice_aqc_recipe_data_elem *s_recipe_list, 1038 u16 num_recipes, struct ice_sq_cd *cd) 1039 { 1040 struct ice_aqc_add_get_recipe *cmd; 1041 struct ice_aq_desc desc; 1042 u16 buf_size; 1043 1044 cmd = &desc.params.add_get_recipe; 1045 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe); 1046 1047 cmd->num_sub_recipes = cpu_to_le16(num_recipes); 1048 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1049 1050 buf_size = num_recipes * sizeof(*s_recipe_list); 1051 1052 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); 1053 } 1054 1055 /** 1056 * ice_aq_get_recipe - get switch recipe 1057 * @hw: pointer to the HW struct 1058 * @s_recipe_list: pointer to switch rule population list 1059 * @num_recipes: pointer to the number of recipes (input and output) 1060 * @recipe_root: root recipe number of recipe(s) to retrieve 1061 * @cd: pointer to command details structure or NULL 1062 * 1063 * Get(0x0292) 1064 * 1065 * On input, *num_recipes should equal the number of entries in s_recipe_list. 1066 * On output, *num_recipes will equal the number of entries returned in 1067 * s_recipe_list. 1068 * 1069 * The caller must supply enough space in s_recipe_list to hold all possible 1070 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES. 1071 */ 1072 static int 1073 ice_aq_get_recipe(struct ice_hw *hw, 1074 struct ice_aqc_recipe_data_elem *s_recipe_list, 1075 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd) 1076 { 1077 struct ice_aqc_add_get_recipe *cmd; 1078 struct ice_aq_desc desc; 1079 u16 buf_size; 1080 int status; 1081 1082 if (*num_recipes != ICE_MAX_NUM_RECIPES) 1083 return -EINVAL; 1084 1085 cmd = &desc.params.add_get_recipe; 1086 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe); 1087 1088 cmd->return_index = cpu_to_le16(recipe_root); 1089 cmd->num_sub_recipes = 0; 1090 1091 buf_size = *num_recipes * sizeof(*s_recipe_list); 1092 1093 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); 1094 *num_recipes = le16_to_cpu(cmd->num_sub_recipes); 1095 1096 return status; 1097 } 1098 1099 /** 1100 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx 1101 * @hw: pointer to the HW struct 1102 * @params: parameters used to update the default recipe 1103 * 1104 * This function only supports updating default recipes and it only supports 1105 * updating a single recipe based on the lkup_idx at a time. 1106 * 1107 * This is done as a read-modify-write operation. First, get the current recipe 1108 * contents based on the recipe's ID. Then modify the field vector index and 1109 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update 1110 * the pre-existing recipe with the modifications. 1111 */ 1112 int 1113 ice_update_recipe_lkup_idx(struct ice_hw *hw, 1114 struct ice_update_recipe_lkup_idx_params *params) 1115 { 1116 struct ice_aqc_recipe_data_elem *rcp_list; 1117 u16 num_recps = ICE_MAX_NUM_RECIPES; 1118 int status; 1119 1120 rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL); 1121 if (!rcp_list) 1122 return -ENOMEM; 1123 1124 /* read current recipe list from firmware */ 1125 rcp_list->recipe_indx = params->rid; 1126 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL); 1127 if (status) { 1128 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n", 1129 params->rid, status); 1130 goto error_out; 1131 } 1132 1133 /* only modify existing recipe's lkup_idx and mask if valid, while 1134 * leaving all other fields the same, then update the recipe firmware 1135 */ 1136 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx; 1137 if (params->mask_valid) 1138 rcp_list->content.mask[params->lkup_idx] = 1139 cpu_to_le16(params->mask); 1140 1141 if (params->ignore_valid) 1142 rcp_list->content.lkup_indx[params->lkup_idx] |= 1143 ICE_AQ_RECIPE_LKUP_IGNORE; 1144 1145 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL); 1146 if (status) 1147 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n", 1148 params->rid, params->lkup_idx, params->fv_idx, 1149 params->mask, params->mask_valid ? "true" : "false", 1150 status); 1151 1152 error_out: 1153 kfree(rcp_list); 1154 return status; 1155 } 1156 1157 /** 1158 * ice_aq_map_recipe_to_profile - Map recipe to packet profile 1159 * @hw: pointer to the HW struct 1160 * @profile_id: package profile ID to associate the recipe with 1161 * @r_bitmap: Recipe bitmap filled in and need to be returned as response 1162 * @cd: pointer to command details structure or NULL 1163 * Recipe to profile association (0x0291) 1164 */ 1165 static int 1166 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, 1167 struct ice_sq_cd *cd) 1168 { 1169 struct ice_aqc_recipe_to_profile *cmd; 1170 struct ice_aq_desc desc; 1171 1172 cmd = &desc.params.recipe_to_profile; 1173 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile); 1174 cmd->profile_id = cpu_to_le16(profile_id); 1175 /* Set the recipe ID bit in the bitmask to let the device know which 1176 * profile we are associating the recipe to 1177 */ 1178 memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc)); 1179 1180 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1181 } 1182 1183 /** 1184 * ice_aq_get_recipe_to_profile - Map recipe to packet profile 1185 * @hw: pointer to the HW struct 1186 * @profile_id: package profile ID to associate the recipe with 1187 * @r_bitmap: Recipe bitmap filled in and need to be returned as response 1188 * @cd: pointer to command details structure or NULL 1189 * Associate profile ID with given recipe (0x0293) 1190 */ 1191 static int 1192 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, 1193 struct ice_sq_cd *cd) 1194 { 1195 struct ice_aqc_recipe_to_profile *cmd; 1196 struct ice_aq_desc desc; 1197 int status; 1198 1199 cmd = &desc.params.recipe_to_profile; 1200 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile); 1201 cmd->profile_id = cpu_to_le16(profile_id); 1202 1203 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1204 if (!status) 1205 memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc)); 1206 1207 return status; 1208 } 1209 1210 /** 1211 * ice_alloc_recipe - add recipe resource 1212 * @hw: pointer to the hardware structure 1213 * @rid: recipe ID returned as response to AQ call 1214 */ 1215 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) 1216 { 1217 struct ice_aqc_alloc_free_res_elem *sw_buf; 1218 u16 buf_len; 1219 int status; 1220 1221 buf_len = struct_size(sw_buf, elem, 1); 1222 sw_buf = kzalloc(buf_len, GFP_KERNEL); 1223 if (!sw_buf) 1224 return -ENOMEM; 1225 1226 sw_buf->num_elems = cpu_to_le16(1); 1227 sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << 1228 ICE_AQC_RES_TYPE_S) | 1229 ICE_AQC_RES_TYPE_FLAG_SHARED); 1230 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, 1231 ice_aqc_opc_alloc_res, NULL); 1232 if (!status) 1233 *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); 1234 kfree(sw_buf); 1235 1236 return status; 1237 } 1238 1239 /** 1240 * ice_get_recp_to_prof_map - updates recipe to profile mapping 1241 * @hw: pointer to hardware structure 1242 * 1243 * This function is used to populate recipe_to_profile matrix where index to 1244 * this array is the recipe ID and the element is the mapping of which profiles 1245 * is this recipe mapped to. 1246 */ 1247 static void ice_get_recp_to_prof_map(struct ice_hw *hw) 1248 { 1249 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); 1250 u16 i; 1251 1252 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) { 1253 u16 j; 1254 1255 bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES); 1256 bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES); 1257 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL)) 1258 continue; 1259 bitmap_copy(profile_to_recipe[i], r_bitmap, 1260 ICE_MAX_NUM_RECIPES); 1261 for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES) 1262 set_bit(i, recipe_to_profile[j]); 1263 } 1264 } 1265 1266 /** 1267 * ice_collect_result_idx - copy result index values 1268 * @buf: buffer that contains the result index 1269 * @recp: the recipe struct to copy data into 1270 */ 1271 static void 1272 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, 1273 struct ice_sw_recipe *recp) 1274 { 1275 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN) 1276 set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, 1277 recp->res_idxs); 1278 } 1279 1280 /** 1281 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries 1282 * @hw: pointer to hardware structure 1283 * @recps: struct that we need to populate 1284 * @rid: recipe ID that we are populating 1285 * @refresh_required: true if we should get recipe to profile mapping from FW 1286 * 1287 * This function is used to populate all the necessary entries into our 1288 * bookkeeping so that we have a current list of all the recipes that are 1289 * programmed in the firmware. 1290 */ 1291 static int 1292 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, 1293 bool *refresh_required) 1294 { 1295 DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS); 1296 struct ice_aqc_recipe_data_elem *tmp; 1297 u16 num_recps = ICE_MAX_NUM_RECIPES; 1298 struct ice_prot_lkup_ext *lkup_exts; 1299 u8 fv_word_idx = 0; 1300 u16 sub_recps; 1301 int status; 1302 1303 bitmap_zero(result_bm, ICE_MAX_FV_WORDS); 1304 1305 /* we need a buffer big enough to accommodate all the recipes */ 1306 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); 1307 if (!tmp) 1308 return -ENOMEM; 1309 1310 tmp[0].recipe_indx = rid; 1311 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL); 1312 /* non-zero status meaning recipe doesn't exist */ 1313 if (status) 1314 goto err_unroll; 1315 1316 /* Get recipe to profile map so that we can get the fv from lkups that 1317 * we read for a recipe from FW. Since we want to minimize the number of 1318 * times we make this FW call, just make one call and cache the copy 1319 * until a new recipe is added. This operation is only required the 1320 * first time to get the changes from FW. Then to search existing 1321 * entries we don't need to update the cache again until another recipe 1322 * gets added. 1323 */ 1324 if (*refresh_required) { 1325 ice_get_recp_to_prof_map(hw); 1326 *refresh_required = false; 1327 } 1328 1329 /* Start populating all the entries for recps[rid] based on lkups from 1330 * firmware. Note that we are only creating the root recipe in our 1331 * database. 1332 */ 1333 lkup_exts = &recps[rid].lkup_exts; 1334 1335 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) { 1336 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps]; 1337 struct ice_recp_grp_entry *rg_entry; 1338 u8 i, prof, idx, prot = 0; 1339 bool is_root; 1340 u16 off = 0; 1341 1342 rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), 1343 GFP_KERNEL); 1344 if (!rg_entry) { 1345 status = -ENOMEM; 1346 goto err_unroll; 1347 } 1348 1349 idx = root_bufs.recipe_indx; 1350 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT; 1351 1352 /* Mark all result indices in this chain */ 1353 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) 1354 set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, 1355 result_bm); 1356 1357 /* get the first profile that is associated with rid */ 1358 prof = find_first_bit(recipe_to_profile[idx], 1359 ICE_MAX_NUM_PROFILES); 1360 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { 1361 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1]; 1362 1363 rg_entry->fv_idx[i] = lkup_indx; 1364 rg_entry->fv_mask[i] = 1365 le16_to_cpu(root_bufs.content.mask[i + 1]); 1366 1367 /* If the recipe is a chained recipe then all its 1368 * child recipe's result will have a result index. 1369 * To fill fv_words we should not use those result 1370 * index, we only need the protocol ids and offsets. 1371 * We will skip all the fv_idx which stores result 1372 * index in them. We also need to skip any fv_idx which 1373 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a 1374 * valid offset value. 1375 */ 1376 if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) || 1377 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE || 1378 rg_entry->fv_idx[i] == 0) 1379 continue; 1380 1381 ice_find_prot_off(hw, ICE_BLK_SW, prof, 1382 rg_entry->fv_idx[i], &prot, &off); 1383 lkup_exts->fv_words[fv_word_idx].prot_id = prot; 1384 lkup_exts->fv_words[fv_word_idx].off = off; 1385 lkup_exts->field_mask[fv_word_idx] = 1386 rg_entry->fv_mask[i]; 1387 fv_word_idx++; 1388 } 1389 /* populate rg_list with the data from the child entry of this 1390 * recipe 1391 */ 1392 list_add(&rg_entry->l_entry, &recps[rid].rg_list); 1393 1394 /* Propagate some data to the recipe database */ 1395 recps[idx].is_root = !!is_root; 1396 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; 1397 bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); 1398 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { 1399 recps[idx].chain_idx = root_bufs.content.result_indx & 1400 ~ICE_AQ_RECIPE_RESULT_EN; 1401 set_bit(recps[idx].chain_idx, recps[idx].res_idxs); 1402 } else { 1403 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND; 1404 } 1405 1406 if (!is_root) 1407 continue; 1408 1409 /* Only do the following for root recipes entries */ 1410 memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap, 1411 sizeof(recps[idx].r_bitmap)); 1412 recps[idx].root_rid = root_bufs.content.rid & 1413 ~ICE_AQ_RECIPE_ID_IS_ROOT; 1414 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; 1415 } 1416 1417 /* Complete initialization of the root recipe entry */ 1418 lkup_exts->n_val_words = fv_word_idx; 1419 recps[rid].big_recp = (num_recps > 1); 1420 recps[rid].n_grp_count = (u8)num_recps; 1421 recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp, 1422 recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), 1423 GFP_KERNEL); 1424 if (!recps[rid].root_buf) { 1425 status = -ENOMEM; 1426 goto err_unroll; 1427 } 1428 1429 /* Copy result indexes */ 1430 bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); 1431 recps[rid].recp_created = true; 1432 1433 err_unroll: 1434 kfree(tmp); 1435 return status; 1436 } 1437 1438 /* ice_init_port_info - Initialize port_info with switch configuration data 1439 * @pi: pointer to port_info 1440 * @vsi_port_num: VSI number or port number 1441 * @type: Type of switch element (port or VSI) 1442 * @swid: switch ID of the switch the element is attached to 1443 * @pf_vf_num: PF or VF number 1444 * @is_vf: true if the element is a VF, false otherwise 1445 */ 1446 static void 1447 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, 1448 u16 swid, u16 pf_vf_num, bool is_vf) 1449 { 1450 switch (type) { 1451 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT: 1452 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK); 1453 pi->sw_id = swid; 1454 pi->pf_vf_num = pf_vf_num; 1455 pi->is_vf = is_vf; 1456 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 1457 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 1458 break; 1459 default: 1460 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n"); 1461 break; 1462 } 1463 } 1464 1465 /* ice_get_initial_sw_cfg - Get initial port and default VSI data 1466 * @hw: pointer to the hardware structure 1467 */ 1468 int ice_get_initial_sw_cfg(struct ice_hw *hw) 1469 { 1470 struct ice_aqc_get_sw_cfg_resp_elem *rbuf; 1471 u16 req_desc = 0; 1472 u16 num_elems; 1473 int status; 1474 u16 i; 1475 1476 rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN, 1477 GFP_KERNEL); 1478 1479 if (!rbuf) 1480 return -ENOMEM; 1481 1482 /* Multiple calls to ice_aq_get_sw_cfg may be required 1483 * to get all the switch configuration information. The need 1484 * for additional calls is indicated by ice_aq_get_sw_cfg 1485 * writing a non-zero value in req_desc 1486 */ 1487 do { 1488 struct ice_aqc_get_sw_cfg_resp_elem *ele; 1489 1490 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN, 1491 &req_desc, &num_elems, NULL); 1492 1493 if (status) 1494 break; 1495 1496 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) { 1497 u16 pf_vf_num, swid, vsi_port_num; 1498 bool is_vf = false; 1499 u8 res_type; 1500 1501 vsi_port_num = le16_to_cpu(ele->vsi_port_num) & 1502 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M; 1503 1504 pf_vf_num = le16_to_cpu(ele->pf_vf_num) & 1505 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M; 1506 1507 swid = le16_to_cpu(ele->swid); 1508 1509 if (le16_to_cpu(ele->pf_vf_num) & 1510 ICE_AQC_GET_SW_CONF_RESP_IS_VF) 1511 is_vf = true; 1512 1513 res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >> 1514 ICE_AQC_GET_SW_CONF_RESP_TYPE_S); 1515 1516 if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) { 1517 /* FW VSI is not needed. Just continue. */ 1518 continue; 1519 } 1520 1521 ice_init_port_info(hw->port_info, vsi_port_num, 1522 res_type, swid, pf_vf_num, is_vf); 1523 } 1524 } while (req_desc && !status); 1525 1526 devm_kfree(ice_hw_to_dev(hw), rbuf); 1527 return status; 1528 } 1529 1530 /** 1531 * ice_fill_sw_info - Helper function to populate lb_en and lan_en 1532 * @hw: pointer to the hardware structure 1533 * @fi: filter info structure to fill/update 1534 * 1535 * This helper function populates the lb_en and lan_en elements of the provided 1536 * ice_fltr_info struct using the switch's type and characteristics of the 1537 * switch rule being configured. 1538 */ 1539 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) 1540 { 1541 fi->lb_en = false; 1542 fi->lan_en = false; 1543 if ((fi->flag & ICE_FLTR_TX) && 1544 (fi->fltr_act == ICE_FWD_TO_VSI || 1545 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 1546 fi->fltr_act == ICE_FWD_TO_Q || 1547 fi->fltr_act == ICE_FWD_TO_QGRP)) { 1548 /* Setting LB for prune actions will result in replicated 1549 * packets to the internal switch that will be dropped. 1550 */ 1551 if (fi->lkup_type != ICE_SW_LKUP_VLAN) 1552 fi->lb_en = true; 1553 1554 /* Set lan_en to TRUE if 1555 * 1. The switch is a VEB AND 1556 * 2 1557 * 2.1 The lookup is a directional lookup like ethertype, 1558 * promiscuous, ethertype-MAC, promiscuous-VLAN 1559 * and default-port OR 1560 * 2.2 The lookup is VLAN, OR 1561 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR 1562 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC. 1563 * 1564 * OR 1565 * 1566 * The switch is a VEPA. 1567 * 1568 * In all other cases, the LAN enable has to be set to false. 1569 */ 1570 if (hw->evb_veb) { 1571 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE || 1572 fi->lkup_type == ICE_SW_LKUP_PROMISC || 1573 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 1574 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN || 1575 fi->lkup_type == ICE_SW_LKUP_DFLT || 1576 fi->lkup_type == ICE_SW_LKUP_VLAN || 1577 (fi->lkup_type == ICE_SW_LKUP_MAC && 1578 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) || 1579 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && 1580 !is_unicast_ether_addr(fi->l_data.mac.mac_addr))) 1581 fi->lan_en = true; 1582 } else { 1583 fi->lan_en = true; 1584 } 1585 } 1586 } 1587 1588 /** 1589 * ice_fill_sw_rule - Helper function to fill switch rule structure 1590 * @hw: pointer to the hardware structure 1591 * @f_info: entry containing packet forwarding information 1592 * @s_rule: switch rule structure to be filled in based on mac_entry 1593 * @opc: switch rules population command type - pass in the command opcode 1594 */ 1595 static void 1596 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, 1597 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) 1598 { 1599 u16 vlan_id = ICE_MAX_VLAN_ID + 1; 1600 u16 vlan_tpid = ETH_P_8021Q; 1601 void *daddr = NULL; 1602 u16 eth_hdr_sz; 1603 u8 *eth_hdr; 1604 u32 act = 0; 1605 __be16 *off; 1606 u8 q_rgn; 1607 1608 if (opc == ice_aqc_opc_remove_sw_rules) { 1609 s_rule->pdata.lkup_tx_rx.act = 0; 1610 s_rule->pdata.lkup_tx_rx.index = 1611 cpu_to_le16(f_info->fltr_rule_id); 1612 s_rule->pdata.lkup_tx_rx.hdr_len = 0; 1613 return; 1614 } 1615 1616 eth_hdr_sz = sizeof(dummy_eth_header); 1617 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr; 1618 1619 /* initialize the ether header with a dummy header */ 1620 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz); 1621 ice_fill_sw_info(hw, f_info); 1622 1623 switch (f_info->fltr_act) { 1624 case ICE_FWD_TO_VSI: 1625 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & 1626 ICE_SINGLE_ACT_VSI_ID_M; 1627 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 1628 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 1629 ICE_SINGLE_ACT_VALID_BIT; 1630 break; 1631 case ICE_FWD_TO_VSI_LIST: 1632 act |= ICE_SINGLE_ACT_VSI_LIST; 1633 act |= (f_info->fwd_id.vsi_list_id << 1634 ICE_SINGLE_ACT_VSI_LIST_ID_S) & 1635 ICE_SINGLE_ACT_VSI_LIST_ID_M; 1636 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 1637 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 1638 ICE_SINGLE_ACT_VALID_BIT; 1639 break; 1640 case ICE_FWD_TO_Q: 1641 act |= ICE_SINGLE_ACT_TO_Q; 1642 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 1643 ICE_SINGLE_ACT_Q_INDEX_M; 1644 break; 1645 case ICE_DROP_PACKET: 1646 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 1647 ICE_SINGLE_ACT_VALID_BIT; 1648 break; 1649 case ICE_FWD_TO_QGRP: 1650 q_rgn = f_info->qgrp_size > 0 ? 1651 (u8)ilog2(f_info->qgrp_size) : 0; 1652 act |= ICE_SINGLE_ACT_TO_Q; 1653 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 1654 ICE_SINGLE_ACT_Q_INDEX_M; 1655 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & 1656 ICE_SINGLE_ACT_Q_REGION_M; 1657 break; 1658 default: 1659 return; 1660 } 1661 1662 if (f_info->lb_en) 1663 act |= ICE_SINGLE_ACT_LB_ENABLE; 1664 if (f_info->lan_en) 1665 act |= ICE_SINGLE_ACT_LAN_ENABLE; 1666 1667 switch (f_info->lkup_type) { 1668 case ICE_SW_LKUP_MAC: 1669 daddr = f_info->l_data.mac.mac_addr; 1670 break; 1671 case ICE_SW_LKUP_VLAN: 1672 vlan_id = f_info->l_data.vlan.vlan_id; 1673 if (f_info->l_data.vlan.tpid_valid) 1674 vlan_tpid = f_info->l_data.vlan.tpid; 1675 if (f_info->fltr_act == ICE_FWD_TO_VSI || 1676 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { 1677 act |= ICE_SINGLE_ACT_PRUNE; 1678 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS; 1679 } 1680 break; 1681 case ICE_SW_LKUP_ETHERTYPE_MAC: 1682 daddr = f_info->l_data.ethertype_mac.mac_addr; 1683 fallthrough; 1684 case ICE_SW_LKUP_ETHERTYPE: 1685 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); 1686 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); 1687 break; 1688 case ICE_SW_LKUP_MAC_VLAN: 1689 daddr = f_info->l_data.mac_vlan.mac_addr; 1690 vlan_id = f_info->l_data.mac_vlan.vlan_id; 1691 break; 1692 case ICE_SW_LKUP_PROMISC_VLAN: 1693 vlan_id = f_info->l_data.mac_vlan.vlan_id; 1694 fallthrough; 1695 case ICE_SW_LKUP_PROMISC: 1696 daddr = f_info->l_data.mac_vlan.mac_addr; 1697 break; 1698 default: 1699 break; 1700 } 1701 1702 s_rule->type = (f_info->flag & ICE_FLTR_RX) ? 1703 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) : 1704 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); 1705 1706 /* Recipe set depending on lookup type */ 1707 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type); 1708 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src); 1709 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); 1710 1711 if (daddr) 1712 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr); 1713 1714 if (!(vlan_id > ICE_MAX_VLAN_ID)) { 1715 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); 1716 *off = cpu_to_be16(vlan_id); 1717 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); 1718 *off = cpu_to_be16(vlan_tpid); 1719 } 1720 1721 /* Create the switch rule with the final dummy Ethernet header */ 1722 if (opc != ice_aqc_opc_update_sw_rules) 1723 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz); 1724 } 1725 1726 /** 1727 * ice_add_marker_act 1728 * @hw: pointer to the hardware structure 1729 * @m_ent: the management entry for which sw marker needs to be added 1730 * @sw_marker: sw marker to tag the Rx descriptor with 1731 * @l_id: large action resource ID 1732 * 1733 * Create a large action to hold software marker and update the switch rule 1734 * entry pointed by m_ent with newly created large action 1735 */ 1736 static int 1737 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, 1738 u16 sw_marker, u16 l_id) 1739 { 1740 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx; 1741 /* For software marker we need 3 large actions 1742 * 1. FWD action: FWD TO VSI or VSI LIST 1743 * 2. GENERIC VALUE action to hold the profile ID 1744 * 3. GENERIC VALUE action to hold the software marker ID 1745 */ 1746 const u16 num_lg_acts = 3; 1747 u16 lg_act_size; 1748 u16 rules_size; 1749 int status; 1750 u32 act; 1751 u16 id; 1752 1753 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) 1754 return -EINVAL; 1755 1756 /* Create two back-to-back switch rules and submit them to the HW using 1757 * one memory buffer: 1758 * 1. Large Action 1759 * 2. Look up Tx Rx 1760 */ 1761 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts); 1762 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 1763 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); 1764 if (!lg_act) 1765 return -ENOMEM; 1766 1767 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); 1768 1769 /* Fill in the first switch rule i.e. large action */ 1770 lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT); 1771 lg_act->pdata.lg_act.index = cpu_to_le16(l_id); 1772 lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts); 1773 1774 /* First action VSI forwarding or VSI list forwarding depending on how 1775 * many VSIs 1776 */ 1777 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id : 1778 m_ent->fltr_info.fwd_id.hw_vsi_id; 1779 1780 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; 1781 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; 1782 if (m_ent->vsi_count > 1) 1783 act |= ICE_LG_ACT_VSI_LIST; 1784 lg_act->pdata.lg_act.act[0] = cpu_to_le32(act); 1785 1786 /* Second action descriptor type */ 1787 act = ICE_LG_ACT_GENERIC; 1788 1789 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; 1790 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); 1791 1792 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << 1793 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; 1794 1795 /* Third action Marker value */ 1796 act |= ICE_LG_ACT_GENERIC; 1797 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & 1798 ICE_LG_ACT_GENERIC_VALUE_M; 1799 1800 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); 1801 1802 /* call the fill switch rule to fill the lookup Tx Rx structure */ 1803 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, 1804 ice_aqc_opc_update_sw_rules); 1805 1806 /* Update the action to point to the large action ID */ 1807 rx_tx->pdata.lkup_tx_rx.act = 1808 cpu_to_le32(ICE_SINGLE_ACT_PTR | 1809 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & 1810 ICE_SINGLE_ACT_PTR_VAL_M)); 1811 1812 /* Use the filter rule ID of the previously created rule with single 1813 * act. Once the update happens, hardware will treat this as large 1814 * action 1815 */ 1816 rx_tx->pdata.lkup_tx_rx.index = 1817 cpu_to_le16(m_ent->fltr_info.fltr_rule_id); 1818 1819 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2, 1820 ice_aqc_opc_update_sw_rules, NULL); 1821 if (!status) { 1822 m_ent->lg_act_idx = l_id; 1823 m_ent->sw_marker_id = sw_marker; 1824 } 1825 1826 devm_kfree(ice_hw_to_dev(hw), lg_act); 1827 return status; 1828 } 1829 1830 /** 1831 * ice_create_vsi_list_map 1832 * @hw: pointer to the hardware structure 1833 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping 1834 * @num_vsi: number of VSI handles in the array 1835 * @vsi_list_id: VSI list ID generated as part of allocate resource 1836 * 1837 * Helper function to create a new entry of VSI list ID to VSI mapping 1838 * using the given VSI list ID 1839 */ 1840 static struct ice_vsi_list_map_info * 1841 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 1842 u16 vsi_list_id) 1843 { 1844 struct ice_switch_info *sw = hw->switch_info; 1845 struct ice_vsi_list_map_info *v_map; 1846 int i; 1847 1848 v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL); 1849 if (!v_map) 1850 return NULL; 1851 1852 v_map->vsi_list_id = vsi_list_id; 1853 v_map->ref_cnt = 1; 1854 for (i = 0; i < num_vsi; i++) 1855 set_bit(vsi_handle_arr[i], v_map->vsi_map); 1856 1857 list_add(&v_map->list_entry, &sw->vsi_list_map_head); 1858 return v_map; 1859 } 1860 1861 /** 1862 * ice_update_vsi_list_rule 1863 * @hw: pointer to the hardware structure 1864 * @vsi_handle_arr: array of VSI handles to form a VSI list 1865 * @num_vsi: number of VSI handles in the array 1866 * @vsi_list_id: VSI list ID generated as part of allocate resource 1867 * @remove: Boolean value to indicate if this is a remove action 1868 * @opc: switch rules population command type - pass in the command opcode 1869 * @lkup_type: lookup type of the filter 1870 * 1871 * Call AQ command to add a new switch rule or update existing switch rule 1872 * using the given VSI list ID 1873 */ 1874 static int 1875 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 1876 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, 1877 enum ice_sw_lkup_type lkup_type) 1878 { 1879 struct ice_aqc_sw_rules_elem *s_rule; 1880 u16 s_rule_size; 1881 u16 rule_type; 1882 int status; 1883 int i; 1884 1885 if (!num_vsi) 1886 return -EINVAL; 1887 1888 if (lkup_type == ICE_SW_LKUP_MAC || 1889 lkup_type == ICE_SW_LKUP_MAC_VLAN || 1890 lkup_type == ICE_SW_LKUP_ETHERTYPE || 1891 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 1892 lkup_type == ICE_SW_LKUP_PROMISC || 1893 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) 1894 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : 1895 ICE_AQC_SW_RULES_T_VSI_LIST_SET; 1896 else if (lkup_type == ICE_SW_LKUP_VLAN) 1897 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : 1898 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; 1899 else 1900 return -EINVAL; 1901 1902 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); 1903 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 1904 if (!s_rule) 1905 return -ENOMEM; 1906 for (i = 0; i < num_vsi; i++) { 1907 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { 1908 status = -EINVAL; 1909 goto exit; 1910 } 1911 /* AQ call requires hw_vsi_id(s) */ 1912 s_rule->pdata.vsi_list.vsi[i] = 1913 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); 1914 } 1915 1916 s_rule->type = cpu_to_le16(rule_type); 1917 s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); 1918 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 1919 1920 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL); 1921 1922 exit: 1923 devm_kfree(ice_hw_to_dev(hw), s_rule); 1924 return status; 1925 } 1926 1927 /** 1928 * ice_create_vsi_list_rule - Creates and populates a VSI list rule 1929 * @hw: pointer to the HW struct 1930 * @vsi_handle_arr: array of VSI handles to form a VSI list 1931 * @num_vsi: number of VSI handles in the array 1932 * @vsi_list_id: stores the ID of the VSI list to be created 1933 * @lkup_type: switch rule filter's lookup type 1934 */ 1935 static int 1936 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 1937 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) 1938 { 1939 int status; 1940 1941 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, 1942 ice_aqc_opc_alloc_res); 1943 if (status) 1944 return status; 1945 1946 /* Update the newly created VSI list to include the specified VSIs */ 1947 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi, 1948 *vsi_list_id, false, 1949 ice_aqc_opc_add_sw_rules, lkup_type); 1950 } 1951 1952 /** 1953 * ice_create_pkt_fwd_rule 1954 * @hw: pointer to the hardware structure 1955 * @f_entry: entry containing packet forwarding information 1956 * 1957 * Create switch rule with given filter information and add an entry 1958 * to the corresponding filter management list to track this switch rule 1959 * and VSI mapping 1960 */ 1961 static int 1962 ice_create_pkt_fwd_rule(struct ice_hw *hw, 1963 struct ice_fltr_list_entry *f_entry) 1964 { 1965 struct ice_fltr_mgmt_list_entry *fm_entry; 1966 struct ice_aqc_sw_rules_elem *s_rule; 1967 enum ice_sw_lkup_type l_type; 1968 struct ice_sw_recipe *recp; 1969 int status; 1970 1971 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1972 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 1973 if (!s_rule) 1974 return -ENOMEM; 1975 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), 1976 GFP_KERNEL); 1977 if (!fm_entry) { 1978 status = -ENOMEM; 1979 goto ice_create_pkt_fwd_rule_exit; 1980 } 1981 1982 fm_entry->fltr_info = f_entry->fltr_info; 1983 1984 /* Initialize all the fields for the management entry */ 1985 fm_entry->vsi_count = 1; 1986 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX; 1987 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID; 1988 fm_entry->counter_index = ICE_INVAL_COUNTER_ID; 1989 1990 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule, 1991 ice_aqc_opc_add_sw_rules); 1992 1993 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 1994 ice_aqc_opc_add_sw_rules, NULL); 1995 if (status) { 1996 devm_kfree(ice_hw_to_dev(hw), fm_entry); 1997 goto ice_create_pkt_fwd_rule_exit; 1998 } 1999 2000 f_entry->fltr_info.fltr_rule_id = 2001 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 2002 fm_entry->fltr_info.fltr_rule_id = 2003 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 2004 2005 /* The book keeping entries will get removed when base driver 2006 * calls remove filter AQ command 2007 */ 2008 l_type = fm_entry->fltr_info.lkup_type; 2009 recp = &hw->switch_info->recp_list[l_type]; 2010 list_add(&fm_entry->list_entry, &recp->filt_rules); 2011 2012 ice_create_pkt_fwd_rule_exit: 2013 devm_kfree(ice_hw_to_dev(hw), s_rule); 2014 return status; 2015 } 2016 2017 /** 2018 * ice_update_pkt_fwd_rule 2019 * @hw: pointer to the hardware structure 2020 * @f_info: filter information for switch rule 2021 * 2022 * Call AQ command to update a previously created switch rule with a 2023 * VSI list ID 2024 */ 2025 static int 2026 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) 2027 { 2028 struct ice_aqc_sw_rules_elem *s_rule; 2029 int status; 2030 2031 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 2032 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 2033 if (!s_rule) 2034 return -ENOMEM; 2035 2036 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); 2037 2038 s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id); 2039 2040 /* Update switch rule with new rule set to forward VSI list */ 2041 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 2042 ice_aqc_opc_update_sw_rules, NULL); 2043 2044 devm_kfree(ice_hw_to_dev(hw), s_rule); 2045 return status; 2046 } 2047 2048 /** 2049 * ice_update_sw_rule_bridge_mode 2050 * @hw: pointer to the HW struct 2051 * 2052 * Updates unicast switch filter rules based on VEB/VEPA mode 2053 */ 2054 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw) 2055 { 2056 struct ice_switch_info *sw = hw->switch_info; 2057 struct ice_fltr_mgmt_list_entry *fm_entry; 2058 struct list_head *rule_head; 2059 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2060 int status = 0; 2061 2062 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 2063 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 2064 2065 mutex_lock(rule_lock); 2066 list_for_each_entry(fm_entry, rule_head, list_entry) { 2067 struct ice_fltr_info *fi = &fm_entry->fltr_info; 2068 u8 *addr = fi->l_data.mac.mac_addr; 2069 2070 /* Update unicast Tx rules to reflect the selected 2071 * VEB/VEPA mode 2072 */ 2073 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) && 2074 (fi->fltr_act == ICE_FWD_TO_VSI || 2075 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 2076 fi->fltr_act == ICE_FWD_TO_Q || 2077 fi->fltr_act == ICE_FWD_TO_QGRP)) { 2078 status = ice_update_pkt_fwd_rule(hw, fi); 2079 if (status) 2080 break; 2081 } 2082 } 2083 2084 mutex_unlock(rule_lock); 2085 2086 return status; 2087 } 2088 2089 /** 2090 * ice_add_update_vsi_list 2091 * @hw: pointer to the hardware structure 2092 * @m_entry: pointer to current filter management list entry 2093 * @cur_fltr: filter information from the book keeping entry 2094 * @new_fltr: filter information with the new VSI to be added 2095 * 2096 * Call AQ command to add or update previously created VSI list with new VSI. 2097 * 2098 * Helper function to do book keeping associated with adding filter information 2099 * The algorithm to do the book keeping is described below : 2100 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.) 2101 * if only one VSI has been added till now 2102 * Allocate a new VSI list and add two VSIs 2103 * to this list using switch rule command 2104 * Update the previously created switch rule with the 2105 * newly created VSI list ID 2106 * if a VSI list was previously created 2107 * Add the new VSI to the previously created VSI list set 2108 * using the update switch rule command 2109 */ 2110 static int 2111 ice_add_update_vsi_list(struct ice_hw *hw, 2112 struct ice_fltr_mgmt_list_entry *m_entry, 2113 struct ice_fltr_info *cur_fltr, 2114 struct ice_fltr_info *new_fltr) 2115 { 2116 u16 vsi_list_id = 0; 2117 int status = 0; 2118 2119 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || 2120 cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) 2121 return -EOPNOTSUPP; 2122 2123 if ((new_fltr->fltr_act == ICE_FWD_TO_Q || 2124 new_fltr->fltr_act == ICE_FWD_TO_QGRP) && 2125 (cur_fltr->fltr_act == ICE_FWD_TO_VSI || 2126 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) 2127 return -EOPNOTSUPP; 2128 2129 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { 2130 /* Only one entry existed in the mapping and it was not already 2131 * a part of a VSI list. So, create a VSI list with the old and 2132 * new VSIs. 2133 */ 2134 struct ice_fltr_info tmp_fltr; 2135 u16 vsi_handle_arr[2]; 2136 2137 /* A rule already exists with the new VSI being added */ 2138 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) 2139 return -EEXIST; 2140 2141 vsi_handle_arr[0] = cur_fltr->vsi_handle; 2142 vsi_handle_arr[1] = new_fltr->vsi_handle; 2143 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 2144 &vsi_list_id, 2145 new_fltr->lkup_type); 2146 if (status) 2147 return status; 2148 2149 tmp_fltr = *new_fltr; 2150 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; 2151 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 2152 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 2153 /* Update the previous switch rule of "MAC forward to VSI" to 2154 * "MAC fwd to VSI list" 2155 */ 2156 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 2157 if (status) 2158 return status; 2159 2160 cur_fltr->fwd_id.vsi_list_id = vsi_list_id; 2161 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 2162 m_entry->vsi_list_info = 2163 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 2164 vsi_list_id); 2165 2166 if (!m_entry->vsi_list_info) 2167 return -ENOMEM; 2168 2169 /* If this entry was large action then the large action needs 2170 * to be updated to point to FWD to VSI list 2171 */ 2172 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) 2173 status = 2174 ice_add_marker_act(hw, m_entry, 2175 m_entry->sw_marker_id, 2176 m_entry->lg_act_idx); 2177 } else { 2178 u16 vsi_handle = new_fltr->vsi_handle; 2179 enum ice_adminq_opc opcode; 2180 2181 if (!m_entry->vsi_list_info) 2182 return -EIO; 2183 2184 /* A rule already exists with the new VSI being added */ 2185 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 2186 return 0; 2187 2188 /* Update the previously created VSI list set with 2189 * the new VSI ID passed in 2190 */ 2191 vsi_list_id = cur_fltr->fwd_id.vsi_list_id; 2192 opcode = ice_aqc_opc_update_sw_rules; 2193 2194 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, 2195 vsi_list_id, false, opcode, 2196 new_fltr->lkup_type); 2197 /* update VSI list mapping info with new VSI ID */ 2198 if (!status) 2199 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); 2200 } 2201 if (!status) 2202 m_entry->vsi_count++; 2203 return status; 2204 } 2205 2206 /** 2207 * ice_find_rule_entry - Search a rule entry 2208 * @hw: pointer to the hardware structure 2209 * @recp_id: lookup type for which the specified rule needs to be searched 2210 * @f_info: rule information 2211 * 2212 * Helper function to search for a given rule entry 2213 * Returns pointer to entry storing the rule if found 2214 */ 2215 static struct ice_fltr_mgmt_list_entry * 2216 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info) 2217 { 2218 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL; 2219 struct ice_switch_info *sw = hw->switch_info; 2220 struct list_head *list_head; 2221 2222 list_head = &sw->recp_list[recp_id].filt_rules; 2223 list_for_each_entry(list_itr, list_head, list_entry) { 2224 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 2225 sizeof(f_info->l_data)) && 2226 f_info->flag == list_itr->fltr_info.flag) { 2227 ret = list_itr; 2228 break; 2229 } 2230 } 2231 return ret; 2232 } 2233 2234 /** 2235 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1 2236 * @hw: pointer to the hardware structure 2237 * @recp_id: lookup type for which VSI lists needs to be searched 2238 * @vsi_handle: VSI handle to be found in VSI list 2239 * @vsi_list_id: VSI list ID found containing vsi_handle 2240 * 2241 * Helper function to search a VSI list with single entry containing given VSI 2242 * handle element. This can be extended further to search VSI list with more 2243 * than 1 vsi_count. Returns pointer to VSI list entry if found. 2244 */ 2245 static struct ice_vsi_list_map_info * 2246 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, 2247 u16 *vsi_list_id) 2248 { 2249 struct ice_vsi_list_map_info *map_info = NULL; 2250 struct ice_switch_info *sw = hw->switch_info; 2251 struct ice_fltr_mgmt_list_entry *list_itr; 2252 struct list_head *list_head; 2253 2254 list_head = &sw->recp_list[recp_id].filt_rules; 2255 list_for_each_entry(list_itr, list_head, list_entry) { 2256 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { 2257 map_info = list_itr->vsi_list_info; 2258 if (test_bit(vsi_handle, map_info->vsi_map)) { 2259 *vsi_list_id = map_info->vsi_list_id; 2260 return map_info; 2261 } 2262 } 2263 } 2264 return NULL; 2265 } 2266 2267 /** 2268 * ice_add_rule_internal - add rule for a given lookup type 2269 * @hw: pointer to the hardware structure 2270 * @recp_id: lookup type (recipe ID) for which rule has to be added 2271 * @f_entry: structure containing MAC forwarding information 2272 * 2273 * Adds or updates the rule lists for a given recipe 2274 */ 2275 static int 2276 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, 2277 struct ice_fltr_list_entry *f_entry) 2278 { 2279 struct ice_switch_info *sw = hw->switch_info; 2280 struct ice_fltr_info *new_fltr, *cur_fltr; 2281 struct ice_fltr_mgmt_list_entry *m_entry; 2282 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2283 int status = 0; 2284 2285 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 2286 return -EINVAL; 2287 f_entry->fltr_info.fwd_id.hw_vsi_id = 2288 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 2289 2290 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 2291 2292 mutex_lock(rule_lock); 2293 new_fltr = &f_entry->fltr_info; 2294 if (new_fltr->flag & ICE_FLTR_RX) 2295 new_fltr->src = hw->port_info->lport; 2296 else if (new_fltr->flag & ICE_FLTR_TX) 2297 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id; 2298 2299 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr); 2300 if (!m_entry) { 2301 mutex_unlock(rule_lock); 2302 return ice_create_pkt_fwd_rule(hw, f_entry); 2303 } 2304 2305 cur_fltr = &m_entry->fltr_info; 2306 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr); 2307 mutex_unlock(rule_lock); 2308 2309 return status; 2310 } 2311 2312 /** 2313 * ice_remove_vsi_list_rule 2314 * @hw: pointer to the hardware structure 2315 * @vsi_list_id: VSI list ID generated as part of allocate resource 2316 * @lkup_type: switch rule filter lookup type 2317 * 2318 * The VSI list should be emptied before this function is called to remove the 2319 * VSI list. 2320 */ 2321 static int 2322 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, 2323 enum ice_sw_lkup_type lkup_type) 2324 { 2325 struct ice_aqc_sw_rules_elem *s_rule; 2326 u16 s_rule_size; 2327 int status; 2328 2329 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); 2330 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 2331 if (!s_rule) 2332 return -ENOMEM; 2333 2334 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); 2335 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 2336 2337 /* Free the vsi_list resource that we allocated. It is assumed that the 2338 * list is empty at this point. 2339 */ 2340 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, 2341 ice_aqc_opc_free_res); 2342 2343 devm_kfree(ice_hw_to_dev(hw), s_rule); 2344 return status; 2345 } 2346 2347 /** 2348 * ice_rem_update_vsi_list 2349 * @hw: pointer to the hardware structure 2350 * @vsi_handle: VSI handle of the VSI to remove 2351 * @fm_list: filter management entry for which the VSI list management needs to 2352 * be done 2353 */ 2354 static int 2355 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, 2356 struct ice_fltr_mgmt_list_entry *fm_list) 2357 { 2358 enum ice_sw_lkup_type lkup_type; 2359 u16 vsi_list_id; 2360 int status = 0; 2361 2362 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || 2363 fm_list->vsi_count == 0) 2364 return -EINVAL; 2365 2366 /* A rule with the VSI being removed does not exist */ 2367 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) 2368 return -ENOENT; 2369 2370 lkup_type = fm_list->fltr_info.lkup_type; 2371 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; 2372 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, 2373 ice_aqc_opc_update_sw_rules, 2374 lkup_type); 2375 if (status) 2376 return status; 2377 2378 fm_list->vsi_count--; 2379 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 2380 2381 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) { 2382 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info; 2383 struct ice_vsi_list_map_info *vsi_list_info = 2384 fm_list->vsi_list_info; 2385 u16 rem_vsi_handle; 2386 2387 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, 2388 ICE_MAX_VSI); 2389 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 2390 return -EIO; 2391 2392 /* Make sure VSI list is empty before removing it below */ 2393 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 2394 vsi_list_id, true, 2395 ice_aqc_opc_update_sw_rules, 2396 lkup_type); 2397 if (status) 2398 return status; 2399 2400 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI; 2401 tmp_fltr_info.fwd_id.hw_vsi_id = 2402 ice_get_hw_vsi_num(hw, rem_vsi_handle); 2403 tmp_fltr_info.vsi_handle = rem_vsi_handle; 2404 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); 2405 if (status) { 2406 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", 2407 tmp_fltr_info.fwd_id.hw_vsi_id, status); 2408 return status; 2409 } 2410 2411 fm_list->fltr_info = tmp_fltr_info; 2412 } 2413 2414 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || 2415 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { 2416 struct ice_vsi_list_map_info *vsi_list_info = 2417 fm_list->vsi_list_info; 2418 2419 /* Remove the VSI list since it is no longer used */ 2420 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 2421 if (status) { 2422 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", 2423 vsi_list_id, status); 2424 return status; 2425 } 2426 2427 list_del(&vsi_list_info->list_entry); 2428 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 2429 fm_list->vsi_list_info = NULL; 2430 } 2431 2432 return status; 2433 } 2434 2435 /** 2436 * ice_remove_rule_internal - Remove a filter rule of a given type 2437 * @hw: pointer to the hardware structure 2438 * @recp_id: recipe ID for which the rule needs to removed 2439 * @f_entry: rule entry containing filter information 2440 */ 2441 static int 2442 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, 2443 struct ice_fltr_list_entry *f_entry) 2444 { 2445 struct ice_switch_info *sw = hw->switch_info; 2446 struct ice_fltr_mgmt_list_entry *list_elem; 2447 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2448 bool remove_rule = false; 2449 u16 vsi_handle; 2450 int status = 0; 2451 2452 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 2453 return -EINVAL; 2454 f_entry->fltr_info.fwd_id.hw_vsi_id = 2455 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 2456 2457 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 2458 mutex_lock(rule_lock); 2459 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); 2460 if (!list_elem) { 2461 status = -ENOENT; 2462 goto exit; 2463 } 2464 2465 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { 2466 remove_rule = true; 2467 } else if (!list_elem->vsi_list_info) { 2468 status = -ENOENT; 2469 goto exit; 2470 } else if (list_elem->vsi_list_info->ref_cnt > 1) { 2471 /* a ref_cnt > 1 indicates that the vsi_list is being 2472 * shared by multiple rules. Decrement the ref_cnt and 2473 * remove this rule, but do not modify the list, as it 2474 * is in-use by other rules. 2475 */ 2476 list_elem->vsi_list_info->ref_cnt--; 2477 remove_rule = true; 2478 } else { 2479 /* a ref_cnt of 1 indicates the vsi_list is only used 2480 * by one rule. However, the original removal request is only 2481 * for a single VSI. Update the vsi_list first, and only 2482 * remove the rule if there are no further VSIs in this list. 2483 */ 2484 vsi_handle = f_entry->fltr_info.vsi_handle; 2485 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); 2486 if (status) 2487 goto exit; 2488 /* if VSI count goes to zero after updating the VSI list */ 2489 if (list_elem->vsi_count == 0) 2490 remove_rule = true; 2491 } 2492 2493 if (remove_rule) { 2494 /* Remove the lookup rule */ 2495 struct ice_aqc_sw_rules_elem *s_rule; 2496 2497 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 2498 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 2499 GFP_KERNEL); 2500 if (!s_rule) { 2501 status = -ENOMEM; 2502 goto exit; 2503 } 2504 2505 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule, 2506 ice_aqc_opc_remove_sw_rules); 2507 2508 status = ice_aq_sw_rules(hw, s_rule, 2509 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, 2510 ice_aqc_opc_remove_sw_rules, NULL); 2511 2512 /* Remove a book keeping from the list */ 2513 devm_kfree(ice_hw_to_dev(hw), s_rule); 2514 2515 if (status) 2516 goto exit; 2517 2518 list_del(&list_elem->list_entry); 2519 devm_kfree(ice_hw_to_dev(hw), list_elem); 2520 } 2521 exit: 2522 mutex_unlock(rule_lock); 2523 return status; 2524 } 2525 2526 /** 2527 * ice_mac_fltr_exist - does this MAC filter exist for given VSI 2528 * @hw: pointer to the hardware structure 2529 * @mac: MAC address to be checked (for MAC filter) 2530 * @vsi_handle: check MAC filter for this VSI 2531 */ 2532 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle) 2533 { 2534 struct ice_fltr_mgmt_list_entry *entry; 2535 struct list_head *rule_head; 2536 struct ice_switch_info *sw; 2537 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2538 u16 hw_vsi_id; 2539 2540 if (!ice_is_vsi_valid(hw, vsi_handle)) 2541 return false; 2542 2543 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2544 sw = hw->switch_info; 2545 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 2546 if (!rule_head) 2547 return false; 2548 2549 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 2550 mutex_lock(rule_lock); 2551 list_for_each_entry(entry, rule_head, list_entry) { 2552 struct ice_fltr_info *f_info = &entry->fltr_info; 2553 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 2554 2555 if (is_zero_ether_addr(mac_addr)) 2556 continue; 2557 2558 if (f_info->flag != ICE_FLTR_TX || 2559 f_info->src_id != ICE_SRC_ID_VSI || 2560 f_info->lkup_type != ICE_SW_LKUP_MAC || 2561 f_info->fltr_act != ICE_FWD_TO_VSI || 2562 hw_vsi_id != f_info->fwd_id.hw_vsi_id) 2563 continue; 2564 2565 if (ether_addr_equal(mac, mac_addr)) { 2566 mutex_unlock(rule_lock); 2567 return true; 2568 } 2569 } 2570 mutex_unlock(rule_lock); 2571 return false; 2572 } 2573 2574 /** 2575 * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI 2576 * @hw: pointer to the hardware structure 2577 * @vlan_id: VLAN ID 2578 * @vsi_handle: check MAC filter for this VSI 2579 */ 2580 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle) 2581 { 2582 struct ice_fltr_mgmt_list_entry *entry; 2583 struct list_head *rule_head; 2584 struct ice_switch_info *sw; 2585 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2586 u16 hw_vsi_id; 2587 2588 if (vlan_id > ICE_MAX_VLAN_ID) 2589 return false; 2590 2591 if (!ice_is_vsi_valid(hw, vsi_handle)) 2592 return false; 2593 2594 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2595 sw = hw->switch_info; 2596 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 2597 if (!rule_head) 2598 return false; 2599 2600 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 2601 mutex_lock(rule_lock); 2602 list_for_each_entry(entry, rule_head, list_entry) { 2603 struct ice_fltr_info *f_info = &entry->fltr_info; 2604 u16 entry_vlan_id = f_info->l_data.vlan.vlan_id; 2605 struct ice_vsi_list_map_info *map_info; 2606 2607 if (entry_vlan_id > ICE_MAX_VLAN_ID) 2608 continue; 2609 2610 if (f_info->flag != ICE_FLTR_TX || 2611 f_info->src_id != ICE_SRC_ID_VSI || 2612 f_info->lkup_type != ICE_SW_LKUP_VLAN) 2613 continue; 2614 2615 /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */ 2616 if (f_info->fltr_act != ICE_FWD_TO_VSI && 2617 f_info->fltr_act != ICE_FWD_TO_VSI_LIST) 2618 continue; 2619 2620 if (f_info->fltr_act == ICE_FWD_TO_VSI) { 2621 if (hw_vsi_id != f_info->fwd_id.hw_vsi_id) 2622 continue; 2623 } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { 2624 /* If filter_action is FWD_TO_VSI_LIST, make sure 2625 * that VSI being checked is part of VSI list 2626 */ 2627 if (entry->vsi_count == 1 && 2628 entry->vsi_list_info) { 2629 map_info = entry->vsi_list_info; 2630 if (!test_bit(vsi_handle, map_info->vsi_map)) 2631 continue; 2632 } 2633 } 2634 2635 if (vlan_id == entry_vlan_id) { 2636 mutex_unlock(rule_lock); 2637 return true; 2638 } 2639 } 2640 mutex_unlock(rule_lock); 2641 2642 return false; 2643 } 2644 2645 /** 2646 * ice_add_mac - Add a MAC address based filter rule 2647 * @hw: pointer to the hardware structure 2648 * @m_list: list of MAC addresses and forwarding information 2649 * 2650 * IMPORTANT: When the ucast_shared flag is set to false and m_list has 2651 * multiple unicast addresses, the function assumes that all the 2652 * addresses are unique in a given add_mac call. It doesn't 2653 * check for duplicates in this case, removing duplicates from a given 2654 * list should be taken care of in the caller of this function. 2655 */ 2656 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list) 2657 { 2658 struct ice_aqc_sw_rules_elem *s_rule, *r_iter; 2659 struct ice_fltr_list_entry *m_list_itr; 2660 struct list_head *rule_head; 2661 u16 total_elem_left, s_rule_size; 2662 struct ice_switch_info *sw; 2663 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2664 u16 num_unicast = 0; 2665 int status = 0; 2666 u8 elem_sent; 2667 2668 if (!m_list || !hw) 2669 return -EINVAL; 2670 2671 s_rule = NULL; 2672 sw = hw->switch_info; 2673 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 2674 list_for_each_entry(m_list_itr, m_list, list_entry) { 2675 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; 2676 u16 vsi_handle; 2677 u16 hw_vsi_id; 2678 2679 m_list_itr->fltr_info.flag = ICE_FLTR_TX; 2680 vsi_handle = m_list_itr->fltr_info.vsi_handle; 2681 if (!ice_is_vsi_valid(hw, vsi_handle)) 2682 return -EINVAL; 2683 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2684 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; 2685 /* update the src in case it is VSI num */ 2686 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) 2687 return -EINVAL; 2688 m_list_itr->fltr_info.src = hw_vsi_id; 2689 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || 2690 is_zero_ether_addr(add)) 2691 return -EINVAL; 2692 if (is_unicast_ether_addr(add) && !hw->ucast_shared) { 2693 /* Don't overwrite the unicast address */ 2694 mutex_lock(rule_lock); 2695 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, 2696 &m_list_itr->fltr_info)) { 2697 mutex_unlock(rule_lock); 2698 return -EEXIST; 2699 } 2700 mutex_unlock(rule_lock); 2701 num_unicast++; 2702 } else if (is_multicast_ether_addr(add) || 2703 (is_unicast_ether_addr(add) && hw->ucast_shared)) { 2704 m_list_itr->status = 2705 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC, 2706 m_list_itr); 2707 if (m_list_itr->status) 2708 return m_list_itr->status; 2709 } 2710 } 2711 2712 mutex_lock(rule_lock); 2713 /* Exit if no suitable entries were found for adding bulk switch rule */ 2714 if (!num_unicast) { 2715 status = 0; 2716 goto ice_add_mac_exit; 2717 } 2718 2719 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 2720 2721 /* Allocate switch rule buffer for the bulk update for unicast */ 2722 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 2723 s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, 2724 GFP_KERNEL); 2725 if (!s_rule) { 2726 status = -ENOMEM; 2727 goto ice_add_mac_exit; 2728 } 2729 2730 r_iter = s_rule; 2731 list_for_each_entry(m_list_itr, m_list, list_entry) { 2732 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 2733 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 2734 2735 if (is_unicast_ether_addr(mac_addr)) { 2736 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter, 2737 ice_aqc_opc_add_sw_rules); 2738 r_iter = (struct ice_aqc_sw_rules_elem *) 2739 ((u8 *)r_iter + s_rule_size); 2740 } 2741 } 2742 2743 /* Call AQ bulk switch rule update for all unicast addresses */ 2744 r_iter = s_rule; 2745 /* Call AQ switch rule in AQ_MAX chunk */ 2746 for (total_elem_left = num_unicast; total_elem_left > 0; 2747 total_elem_left -= elem_sent) { 2748 struct ice_aqc_sw_rules_elem *entry = r_iter; 2749 2750 elem_sent = min_t(u8, total_elem_left, 2751 (ICE_AQ_MAX_BUF_LEN / s_rule_size)); 2752 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, 2753 elem_sent, ice_aqc_opc_add_sw_rules, 2754 NULL); 2755 if (status) 2756 goto ice_add_mac_exit; 2757 r_iter = (struct ice_aqc_sw_rules_elem *) 2758 ((u8 *)r_iter + (elem_sent * s_rule_size)); 2759 } 2760 2761 /* Fill up rule ID based on the value returned from FW */ 2762 r_iter = s_rule; 2763 list_for_each_entry(m_list_itr, m_list, list_entry) { 2764 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 2765 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 2766 struct ice_fltr_mgmt_list_entry *fm_entry; 2767 2768 if (is_unicast_ether_addr(mac_addr)) { 2769 f_info->fltr_rule_id = 2770 le16_to_cpu(r_iter->pdata.lkup_tx_rx.index); 2771 f_info->fltr_act = ICE_FWD_TO_VSI; 2772 /* Create an entry to track this MAC address */ 2773 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), 2774 sizeof(*fm_entry), GFP_KERNEL); 2775 if (!fm_entry) { 2776 status = -ENOMEM; 2777 goto ice_add_mac_exit; 2778 } 2779 fm_entry->fltr_info = *f_info; 2780 fm_entry->vsi_count = 1; 2781 /* The book keeping entries will get removed when 2782 * base driver calls remove filter AQ command 2783 */ 2784 2785 list_add(&fm_entry->list_entry, rule_head); 2786 r_iter = (struct ice_aqc_sw_rules_elem *) 2787 ((u8 *)r_iter + s_rule_size); 2788 } 2789 } 2790 2791 ice_add_mac_exit: 2792 mutex_unlock(rule_lock); 2793 if (s_rule) 2794 devm_kfree(ice_hw_to_dev(hw), s_rule); 2795 return status; 2796 } 2797 2798 /** 2799 * ice_add_vlan_internal - Add one VLAN based filter rule 2800 * @hw: pointer to the hardware structure 2801 * @f_entry: filter entry containing one VLAN information 2802 */ 2803 static int 2804 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) 2805 { 2806 struct ice_switch_info *sw = hw->switch_info; 2807 struct ice_fltr_mgmt_list_entry *v_list_itr; 2808 struct ice_fltr_info *new_fltr, *cur_fltr; 2809 enum ice_sw_lkup_type lkup_type; 2810 u16 vsi_list_id = 0, vsi_handle; 2811 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2812 int status = 0; 2813 2814 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 2815 return -EINVAL; 2816 2817 f_entry->fltr_info.fwd_id.hw_vsi_id = 2818 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 2819 new_fltr = &f_entry->fltr_info; 2820 2821 /* VLAN ID should only be 12 bits */ 2822 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) 2823 return -EINVAL; 2824 2825 if (new_fltr->src_id != ICE_SRC_ID_VSI) 2826 return -EINVAL; 2827 2828 new_fltr->src = new_fltr->fwd_id.hw_vsi_id; 2829 lkup_type = new_fltr->lkup_type; 2830 vsi_handle = new_fltr->vsi_handle; 2831 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 2832 mutex_lock(rule_lock); 2833 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr); 2834 if (!v_list_itr) { 2835 struct ice_vsi_list_map_info *map_info = NULL; 2836 2837 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) { 2838 /* All VLAN pruning rules use a VSI list. Check if 2839 * there is already a VSI list containing VSI that we 2840 * want to add. If found, use the same vsi_list_id for 2841 * this new VLAN rule or else create a new list. 2842 */ 2843 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN, 2844 vsi_handle, 2845 &vsi_list_id); 2846 if (!map_info) { 2847 status = ice_create_vsi_list_rule(hw, 2848 &vsi_handle, 2849 1, 2850 &vsi_list_id, 2851 lkup_type); 2852 if (status) 2853 goto exit; 2854 } 2855 /* Convert the action to forwarding to a VSI list. */ 2856 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 2857 new_fltr->fwd_id.vsi_list_id = vsi_list_id; 2858 } 2859 2860 status = ice_create_pkt_fwd_rule(hw, f_entry); 2861 if (!status) { 2862 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, 2863 new_fltr); 2864 if (!v_list_itr) { 2865 status = -ENOENT; 2866 goto exit; 2867 } 2868 /* reuse VSI list for new rule and increment ref_cnt */ 2869 if (map_info) { 2870 v_list_itr->vsi_list_info = map_info; 2871 map_info->ref_cnt++; 2872 } else { 2873 v_list_itr->vsi_list_info = 2874 ice_create_vsi_list_map(hw, &vsi_handle, 2875 1, vsi_list_id); 2876 } 2877 } 2878 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) { 2879 /* Update existing VSI list to add new VSI ID only if it used 2880 * by one VLAN rule. 2881 */ 2882 cur_fltr = &v_list_itr->fltr_info; 2883 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, 2884 new_fltr); 2885 } else { 2886 /* If VLAN rule exists and VSI list being used by this rule is 2887 * referenced by more than 1 VLAN rule. Then create a new VSI 2888 * list appending previous VSI with new VSI and update existing 2889 * VLAN rule to point to new VSI list ID 2890 */ 2891 struct ice_fltr_info tmp_fltr; 2892 u16 vsi_handle_arr[2]; 2893 u16 cur_handle; 2894 2895 /* Current implementation only supports reusing VSI list with 2896 * one VSI count. We should never hit below condition 2897 */ 2898 if (v_list_itr->vsi_count > 1 && 2899 v_list_itr->vsi_list_info->ref_cnt > 1) { 2900 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); 2901 status = -EIO; 2902 goto exit; 2903 } 2904 2905 cur_handle = 2906 find_first_bit(v_list_itr->vsi_list_info->vsi_map, 2907 ICE_MAX_VSI); 2908 2909 /* A rule already exists with the new VSI being added */ 2910 if (cur_handle == vsi_handle) { 2911 status = -EEXIST; 2912 goto exit; 2913 } 2914 2915 vsi_handle_arr[0] = cur_handle; 2916 vsi_handle_arr[1] = vsi_handle; 2917 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 2918 &vsi_list_id, lkup_type); 2919 if (status) 2920 goto exit; 2921 2922 tmp_fltr = v_list_itr->fltr_info; 2923 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id; 2924 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 2925 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 2926 /* Update the previous switch rule to a new VSI list which 2927 * includes current VSI that is requested 2928 */ 2929 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 2930 if (status) 2931 goto exit; 2932 2933 /* before overriding VSI list map info. decrement ref_cnt of 2934 * previous VSI list 2935 */ 2936 v_list_itr->vsi_list_info->ref_cnt--; 2937 2938 /* now update to newly created list */ 2939 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id; 2940 v_list_itr->vsi_list_info = 2941 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 2942 vsi_list_id); 2943 v_list_itr->vsi_count++; 2944 } 2945 2946 exit: 2947 mutex_unlock(rule_lock); 2948 return status; 2949 } 2950 2951 /** 2952 * ice_add_vlan - Add VLAN based filter rule 2953 * @hw: pointer to the hardware structure 2954 * @v_list: list of VLAN entries and forwarding information 2955 */ 2956 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) 2957 { 2958 struct ice_fltr_list_entry *v_list_itr; 2959 2960 if (!v_list || !hw) 2961 return -EINVAL; 2962 2963 list_for_each_entry(v_list_itr, v_list, list_entry) { 2964 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) 2965 return -EINVAL; 2966 v_list_itr->fltr_info.flag = ICE_FLTR_TX; 2967 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); 2968 if (v_list_itr->status) 2969 return v_list_itr->status; 2970 } 2971 return 0; 2972 } 2973 2974 /** 2975 * ice_add_eth_mac - Add ethertype and MAC based filter rule 2976 * @hw: pointer to the hardware structure 2977 * @em_list: list of ether type MAC filter, MAC is optional 2978 * 2979 * This function requires the caller to populate the entries in 2980 * the filter list with the necessary fields (including flags to 2981 * indicate Tx or Rx rules). 2982 */ 2983 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) 2984 { 2985 struct ice_fltr_list_entry *em_list_itr; 2986 2987 if (!em_list || !hw) 2988 return -EINVAL; 2989 2990 list_for_each_entry(em_list_itr, em_list, list_entry) { 2991 enum ice_sw_lkup_type l_type = 2992 em_list_itr->fltr_info.lkup_type; 2993 2994 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 2995 l_type != ICE_SW_LKUP_ETHERTYPE) 2996 return -EINVAL; 2997 2998 em_list_itr->status = ice_add_rule_internal(hw, l_type, 2999 em_list_itr); 3000 if (em_list_itr->status) 3001 return em_list_itr->status; 3002 } 3003 return 0; 3004 } 3005 3006 /** 3007 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule 3008 * @hw: pointer to the hardware structure 3009 * @em_list: list of ethertype or ethertype MAC entries 3010 */ 3011 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) 3012 { 3013 struct ice_fltr_list_entry *em_list_itr, *tmp; 3014 3015 if (!em_list || !hw) 3016 return -EINVAL; 3017 3018 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) { 3019 enum ice_sw_lkup_type l_type = 3020 em_list_itr->fltr_info.lkup_type; 3021 3022 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 3023 l_type != ICE_SW_LKUP_ETHERTYPE) 3024 return -EINVAL; 3025 3026 em_list_itr->status = ice_remove_rule_internal(hw, l_type, 3027 em_list_itr); 3028 if (em_list_itr->status) 3029 return em_list_itr->status; 3030 } 3031 return 0; 3032 } 3033 3034 /** 3035 * ice_rem_sw_rule_info 3036 * @hw: pointer to the hardware structure 3037 * @rule_head: pointer to the switch list structure that we want to delete 3038 */ 3039 static void 3040 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head) 3041 { 3042 if (!list_empty(rule_head)) { 3043 struct ice_fltr_mgmt_list_entry *entry; 3044 struct ice_fltr_mgmt_list_entry *tmp; 3045 3046 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) { 3047 list_del(&entry->list_entry); 3048 devm_kfree(ice_hw_to_dev(hw), entry); 3049 } 3050 } 3051 } 3052 3053 /** 3054 * ice_rem_adv_rule_info 3055 * @hw: pointer to the hardware structure 3056 * @rule_head: pointer to the switch list structure that we want to delete 3057 */ 3058 static void 3059 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head) 3060 { 3061 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 3062 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 3063 3064 if (list_empty(rule_head)) 3065 return; 3066 3067 list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) { 3068 list_del(&lst_itr->list_entry); 3069 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 3070 devm_kfree(ice_hw_to_dev(hw), lst_itr); 3071 } 3072 } 3073 3074 /** 3075 * ice_cfg_dflt_vsi - change state of VSI to set/clear default 3076 * @hw: pointer to the hardware structure 3077 * @vsi_handle: VSI handle to set as default 3078 * @set: true to add the above mentioned switch rule, false to remove it 3079 * @direction: ICE_FLTR_RX or ICE_FLTR_TX 3080 * 3081 * add filter rule to set/unset given VSI as default VSI for the switch 3082 * (represented by swid) 3083 */ 3084 int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) 3085 { 3086 struct ice_aqc_sw_rules_elem *s_rule; 3087 struct ice_fltr_info f_info; 3088 enum ice_adminq_opc opcode; 3089 u16 s_rule_size; 3090 u16 hw_vsi_id; 3091 int status; 3092 3093 if (!ice_is_vsi_valid(hw, vsi_handle)) 3094 return -EINVAL; 3095 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3096 3097 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : 3098 ICE_SW_RULE_RX_TX_NO_HDR_SIZE; 3099 3100 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 3101 if (!s_rule) 3102 return -ENOMEM; 3103 3104 memset(&f_info, 0, sizeof(f_info)); 3105 3106 f_info.lkup_type = ICE_SW_LKUP_DFLT; 3107 f_info.flag = direction; 3108 f_info.fltr_act = ICE_FWD_TO_VSI; 3109 f_info.fwd_id.hw_vsi_id = hw_vsi_id; 3110 3111 if (f_info.flag & ICE_FLTR_RX) { 3112 f_info.src = hw->port_info->lport; 3113 f_info.src_id = ICE_SRC_ID_LPORT; 3114 if (!set) 3115 f_info.fltr_rule_id = 3116 hw->port_info->dflt_rx_vsi_rule_id; 3117 } else if (f_info.flag & ICE_FLTR_TX) { 3118 f_info.src_id = ICE_SRC_ID_VSI; 3119 f_info.src = hw_vsi_id; 3120 if (!set) 3121 f_info.fltr_rule_id = 3122 hw->port_info->dflt_tx_vsi_rule_id; 3123 } 3124 3125 if (set) 3126 opcode = ice_aqc_opc_add_sw_rules; 3127 else 3128 opcode = ice_aqc_opc_remove_sw_rules; 3129 3130 ice_fill_sw_rule(hw, &f_info, s_rule, opcode); 3131 3132 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL); 3133 if (status || !(f_info.flag & ICE_FLTR_TX_RX)) 3134 goto out; 3135 if (set) { 3136 u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 3137 3138 if (f_info.flag & ICE_FLTR_TX) { 3139 hw->port_info->dflt_tx_vsi_num = hw_vsi_id; 3140 hw->port_info->dflt_tx_vsi_rule_id = index; 3141 } else if (f_info.flag & ICE_FLTR_RX) { 3142 hw->port_info->dflt_rx_vsi_num = hw_vsi_id; 3143 hw->port_info->dflt_rx_vsi_rule_id = index; 3144 } 3145 } else { 3146 if (f_info.flag & ICE_FLTR_TX) { 3147 hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 3148 hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT; 3149 } else if (f_info.flag & ICE_FLTR_RX) { 3150 hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 3151 hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT; 3152 } 3153 } 3154 3155 out: 3156 devm_kfree(ice_hw_to_dev(hw), s_rule); 3157 return status; 3158 } 3159 3160 /** 3161 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry 3162 * @hw: pointer to the hardware structure 3163 * @recp_id: lookup type for which the specified rule needs to be searched 3164 * @f_info: rule information 3165 * 3166 * Helper function to search for a unicast rule entry - this is to be used 3167 * to remove unicast MAC filter that is not shared with other VSIs on the 3168 * PF switch. 3169 * 3170 * Returns pointer to entry storing the rule if found 3171 */ 3172 static struct ice_fltr_mgmt_list_entry * 3173 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id, 3174 struct ice_fltr_info *f_info) 3175 { 3176 struct ice_switch_info *sw = hw->switch_info; 3177 struct ice_fltr_mgmt_list_entry *list_itr; 3178 struct list_head *list_head; 3179 3180 list_head = &sw->recp_list[recp_id].filt_rules; 3181 list_for_each_entry(list_itr, list_head, list_entry) { 3182 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 3183 sizeof(f_info->l_data)) && 3184 f_info->fwd_id.hw_vsi_id == 3185 list_itr->fltr_info.fwd_id.hw_vsi_id && 3186 f_info->flag == list_itr->fltr_info.flag) 3187 return list_itr; 3188 } 3189 return NULL; 3190 } 3191 3192 /** 3193 * ice_remove_mac - remove a MAC address based filter rule 3194 * @hw: pointer to the hardware structure 3195 * @m_list: list of MAC addresses and forwarding information 3196 * 3197 * This function removes either a MAC filter rule or a specific VSI from a 3198 * VSI list for a multicast MAC address. 3199 * 3200 * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should 3201 * be aware that this call will only work if all the entries passed into m_list 3202 * were added previously. It will not attempt to do a partial remove of entries 3203 * that were found. 3204 */ 3205 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) 3206 { 3207 struct ice_fltr_list_entry *list_itr, *tmp; 3208 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3209 3210 if (!m_list) 3211 return -EINVAL; 3212 3213 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 3214 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { 3215 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; 3216 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0]; 3217 u16 vsi_handle; 3218 3219 if (l_type != ICE_SW_LKUP_MAC) 3220 return -EINVAL; 3221 3222 vsi_handle = list_itr->fltr_info.vsi_handle; 3223 if (!ice_is_vsi_valid(hw, vsi_handle)) 3224 return -EINVAL; 3225 3226 list_itr->fltr_info.fwd_id.hw_vsi_id = 3227 ice_get_hw_vsi_num(hw, vsi_handle); 3228 if (is_unicast_ether_addr(add) && !hw->ucast_shared) { 3229 /* Don't remove the unicast address that belongs to 3230 * another VSI on the switch, since it is not being 3231 * shared... 3232 */ 3233 mutex_lock(rule_lock); 3234 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC, 3235 &list_itr->fltr_info)) { 3236 mutex_unlock(rule_lock); 3237 return -ENOENT; 3238 } 3239 mutex_unlock(rule_lock); 3240 } 3241 list_itr->status = ice_remove_rule_internal(hw, 3242 ICE_SW_LKUP_MAC, 3243 list_itr); 3244 if (list_itr->status) 3245 return list_itr->status; 3246 } 3247 return 0; 3248 } 3249 3250 /** 3251 * ice_remove_vlan - Remove VLAN based filter rule 3252 * @hw: pointer to the hardware structure 3253 * @v_list: list of VLAN entries and forwarding information 3254 */ 3255 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) 3256 { 3257 struct ice_fltr_list_entry *v_list_itr, *tmp; 3258 3259 if (!v_list || !hw) 3260 return -EINVAL; 3261 3262 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 3263 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; 3264 3265 if (l_type != ICE_SW_LKUP_VLAN) 3266 return -EINVAL; 3267 v_list_itr->status = ice_remove_rule_internal(hw, 3268 ICE_SW_LKUP_VLAN, 3269 v_list_itr); 3270 if (v_list_itr->status) 3271 return v_list_itr->status; 3272 } 3273 return 0; 3274 } 3275 3276 /** 3277 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter 3278 * @fm_entry: filter entry to inspect 3279 * @vsi_handle: VSI handle to compare with filter info 3280 */ 3281 static bool 3282 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) 3283 { 3284 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && 3285 fm_entry->fltr_info.vsi_handle == vsi_handle) || 3286 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && 3287 fm_entry->vsi_list_info && 3288 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map)))); 3289 } 3290 3291 /** 3292 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list 3293 * @hw: pointer to the hardware structure 3294 * @vsi_handle: VSI handle to remove filters from 3295 * @vsi_list_head: pointer to the list to add entry to 3296 * @fi: pointer to fltr_info of filter entry to copy & add 3297 * 3298 * Helper function, used when creating a list of filters to remove from 3299 * a specific VSI. The entry added to vsi_list_head is a COPY of the 3300 * original filter entry, with the exception of fltr_info.fltr_act and 3301 * fltr_info.fwd_id fields. These are set such that later logic can 3302 * extract which VSI to remove the fltr from, and pass on that information. 3303 */ 3304 static int 3305 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 3306 struct list_head *vsi_list_head, 3307 struct ice_fltr_info *fi) 3308 { 3309 struct ice_fltr_list_entry *tmp; 3310 3311 /* this memory is freed up in the caller function 3312 * once filters for this VSI are removed 3313 */ 3314 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); 3315 if (!tmp) 3316 return -ENOMEM; 3317 3318 tmp->fltr_info = *fi; 3319 3320 /* Overwrite these fields to indicate which VSI to remove filter from, 3321 * so find and remove logic can extract the information from the 3322 * list entries. Note that original entries will still have proper 3323 * values. 3324 */ 3325 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 3326 tmp->fltr_info.vsi_handle = vsi_handle; 3327 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3328 3329 list_add(&tmp->list_entry, vsi_list_head); 3330 3331 return 0; 3332 } 3333 3334 /** 3335 * ice_add_to_vsi_fltr_list - Add VSI filters to the list 3336 * @hw: pointer to the hardware structure 3337 * @vsi_handle: VSI handle to remove filters from 3338 * @lkup_list_head: pointer to the list that has certain lookup type filters 3339 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle 3340 * 3341 * Locates all filters in lkup_list_head that are used by the given VSI, 3342 * and adds COPIES of those entries to vsi_list_head (intended to be used 3343 * to remove the listed filters). 3344 * Note that this means all entries in vsi_list_head must be explicitly 3345 * deallocated by the caller when done with list. 3346 */ 3347 static int 3348 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 3349 struct list_head *lkup_list_head, 3350 struct list_head *vsi_list_head) 3351 { 3352 struct ice_fltr_mgmt_list_entry *fm_entry; 3353 int status = 0; 3354 3355 /* check to make sure VSI ID is valid and within boundary */ 3356 if (!ice_is_vsi_valid(hw, vsi_handle)) 3357 return -EINVAL; 3358 3359 list_for_each_entry(fm_entry, lkup_list_head, list_entry) { 3360 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) 3361 continue; 3362 3363 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 3364 vsi_list_head, 3365 &fm_entry->fltr_info); 3366 if (status) 3367 return status; 3368 } 3369 return status; 3370 } 3371 3372 /** 3373 * ice_determine_promisc_mask 3374 * @fi: filter info to parse 3375 * 3376 * Helper function to determine which ICE_PROMISC_ mask corresponds 3377 * to given filter into. 3378 */ 3379 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) 3380 { 3381 u16 vid = fi->l_data.mac_vlan.vlan_id; 3382 u8 *macaddr = fi->l_data.mac.mac_addr; 3383 bool is_tx_fltr = false; 3384 u8 promisc_mask = 0; 3385 3386 if (fi->flag == ICE_FLTR_TX) 3387 is_tx_fltr = true; 3388 3389 if (is_broadcast_ether_addr(macaddr)) 3390 promisc_mask |= is_tx_fltr ? 3391 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX; 3392 else if (is_multicast_ether_addr(macaddr)) 3393 promisc_mask |= is_tx_fltr ? 3394 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX; 3395 else if (is_unicast_ether_addr(macaddr)) 3396 promisc_mask |= is_tx_fltr ? 3397 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX; 3398 if (vid) 3399 promisc_mask |= is_tx_fltr ? 3400 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX; 3401 3402 return promisc_mask; 3403 } 3404 3405 /** 3406 * ice_remove_promisc - Remove promisc based filter rules 3407 * @hw: pointer to the hardware structure 3408 * @recp_id: recipe ID for which the rule needs to removed 3409 * @v_list: list of promisc entries 3410 */ 3411 static int 3412 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list) 3413 { 3414 struct ice_fltr_list_entry *v_list_itr, *tmp; 3415 3416 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 3417 v_list_itr->status = 3418 ice_remove_rule_internal(hw, recp_id, v_list_itr); 3419 if (v_list_itr->status) 3420 return v_list_itr->status; 3421 } 3422 return 0; 3423 } 3424 3425 /** 3426 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI 3427 * @hw: pointer to the hardware structure 3428 * @vsi_handle: VSI handle to clear mode 3429 * @promisc_mask: mask of promiscuous config bits to clear 3430 * @vid: VLAN ID to clear VLAN promiscuous 3431 */ 3432 int 3433 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 3434 u16 vid) 3435 { 3436 struct ice_switch_info *sw = hw->switch_info; 3437 struct ice_fltr_list_entry *fm_entry, *tmp; 3438 struct list_head remove_list_head; 3439 struct ice_fltr_mgmt_list_entry *itr; 3440 struct list_head *rule_head; 3441 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3442 int status = 0; 3443 u8 recipe_id; 3444 3445 if (!ice_is_vsi_valid(hw, vsi_handle)) 3446 return -EINVAL; 3447 3448 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) 3449 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 3450 else 3451 recipe_id = ICE_SW_LKUP_PROMISC; 3452 3453 rule_head = &sw->recp_list[recipe_id].filt_rules; 3454 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock; 3455 3456 INIT_LIST_HEAD(&remove_list_head); 3457 3458 mutex_lock(rule_lock); 3459 list_for_each_entry(itr, rule_head, list_entry) { 3460 struct ice_fltr_info *fltr_info; 3461 u8 fltr_promisc_mask = 0; 3462 3463 if (!ice_vsi_uses_fltr(itr, vsi_handle)) 3464 continue; 3465 fltr_info = &itr->fltr_info; 3466 3467 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN && 3468 vid != fltr_info->l_data.mac_vlan.vlan_id) 3469 continue; 3470 3471 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info); 3472 3473 /* Skip if filter is not completely specified by given mask */ 3474 if (fltr_promisc_mask & ~promisc_mask) 3475 continue; 3476 3477 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 3478 &remove_list_head, 3479 fltr_info); 3480 if (status) { 3481 mutex_unlock(rule_lock); 3482 goto free_fltr_list; 3483 } 3484 } 3485 mutex_unlock(rule_lock); 3486 3487 status = ice_remove_promisc(hw, recipe_id, &remove_list_head); 3488 3489 free_fltr_list: 3490 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 3491 list_del(&fm_entry->list_entry); 3492 devm_kfree(ice_hw_to_dev(hw), fm_entry); 3493 } 3494 3495 return status; 3496 } 3497 3498 /** 3499 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s) 3500 * @hw: pointer to the hardware structure 3501 * @vsi_handle: VSI handle to configure 3502 * @promisc_mask: mask of promiscuous config bits 3503 * @vid: VLAN ID to set VLAN promiscuous 3504 */ 3505 int 3506 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) 3507 { 3508 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; 3509 struct ice_fltr_list_entry f_list_entry; 3510 struct ice_fltr_info new_fltr; 3511 bool is_tx_fltr; 3512 int status = 0; 3513 u16 hw_vsi_id; 3514 int pkt_type; 3515 u8 recipe_id; 3516 3517 if (!ice_is_vsi_valid(hw, vsi_handle)) 3518 return -EINVAL; 3519 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3520 3521 memset(&new_fltr, 0, sizeof(new_fltr)); 3522 3523 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) { 3524 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN; 3525 new_fltr.l_data.mac_vlan.vlan_id = vid; 3526 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 3527 } else { 3528 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC; 3529 recipe_id = ICE_SW_LKUP_PROMISC; 3530 } 3531 3532 /* Separate filters must be set for each direction/packet type 3533 * combination, so we will loop over the mask value, store the 3534 * individual type, and clear it out in the input mask as it 3535 * is found. 3536 */ 3537 while (promisc_mask) { 3538 u8 *mac_addr; 3539 3540 pkt_type = 0; 3541 is_tx_fltr = false; 3542 3543 if (promisc_mask & ICE_PROMISC_UCAST_RX) { 3544 promisc_mask &= ~ICE_PROMISC_UCAST_RX; 3545 pkt_type = UCAST_FLTR; 3546 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) { 3547 promisc_mask &= ~ICE_PROMISC_UCAST_TX; 3548 pkt_type = UCAST_FLTR; 3549 is_tx_fltr = true; 3550 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) { 3551 promisc_mask &= ~ICE_PROMISC_MCAST_RX; 3552 pkt_type = MCAST_FLTR; 3553 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) { 3554 promisc_mask &= ~ICE_PROMISC_MCAST_TX; 3555 pkt_type = MCAST_FLTR; 3556 is_tx_fltr = true; 3557 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) { 3558 promisc_mask &= ~ICE_PROMISC_BCAST_RX; 3559 pkt_type = BCAST_FLTR; 3560 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) { 3561 promisc_mask &= ~ICE_PROMISC_BCAST_TX; 3562 pkt_type = BCAST_FLTR; 3563 is_tx_fltr = true; 3564 } 3565 3566 /* Check for VLAN promiscuous flag */ 3567 if (promisc_mask & ICE_PROMISC_VLAN_RX) { 3568 promisc_mask &= ~ICE_PROMISC_VLAN_RX; 3569 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) { 3570 promisc_mask &= ~ICE_PROMISC_VLAN_TX; 3571 is_tx_fltr = true; 3572 } 3573 3574 /* Set filter DA based on packet type */ 3575 mac_addr = new_fltr.l_data.mac.mac_addr; 3576 if (pkt_type == BCAST_FLTR) { 3577 eth_broadcast_addr(mac_addr); 3578 } else if (pkt_type == MCAST_FLTR || 3579 pkt_type == UCAST_FLTR) { 3580 /* Use the dummy ether header DA */ 3581 ether_addr_copy(mac_addr, dummy_eth_header); 3582 if (pkt_type == MCAST_FLTR) 3583 mac_addr[0] |= 0x1; /* Set multicast bit */ 3584 } 3585 3586 /* Need to reset this to zero for all iterations */ 3587 new_fltr.flag = 0; 3588 if (is_tx_fltr) { 3589 new_fltr.flag |= ICE_FLTR_TX; 3590 new_fltr.src = hw_vsi_id; 3591 } else { 3592 new_fltr.flag |= ICE_FLTR_RX; 3593 new_fltr.src = hw->port_info->lport; 3594 } 3595 3596 new_fltr.fltr_act = ICE_FWD_TO_VSI; 3597 new_fltr.vsi_handle = vsi_handle; 3598 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id; 3599 f_list_entry.fltr_info = new_fltr; 3600 3601 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry); 3602 if (status) 3603 goto set_promisc_exit; 3604 } 3605 3606 set_promisc_exit: 3607 return status; 3608 } 3609 3610 /** 3611 * ice_set_vlan_vsi_promisc 3612 * @hw: pointer to the hardware structure 3613 * @vsi_handle: VSI handle to configure 3614 * @promisc_mask: mask of promiscuous config bits 3615 * @rm_vlan_promisc: Clear VLANs VSI promisc mode 3616 * 3617 * Configure VSI with all associated VLANs to given promiscuous mode(s) 3618 */ 3619 int 3620 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 3621 bool rm_vlan_promisc) 3622 { 3623 struct ice_switch_info *sw = hw->switch_info; 3624 struct ice_fltr_list_entry *list_itr, *tmp; 3625 struct list_head vsi_list_head; 3626 struct list_head *vlan_head; 3627 struct mutex *vlan_lock; /* Lock to protect filter rule list */ 3628 u16 vlan_id; 3629 int status; 3630 3631 INIT_LIST_HEAD(&vsi_list_head); 3632 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 3633 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 3634 mutex_lock(vlan_lock); 3635 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head, 3636 &vsi_list_head); 3637 mutex_unlock(vlan_lock); 3638 if (status) 3639 goto free_fltr_list; 3640 3641 list_for_each_entry(list_itr, &vsi_list_head, list_entry) { 3642 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; 3643 if (rm_vlan_promisc) 3644 status = ice_clear_vsi_promisc(hw, vsi_handle, 3645 promisc_mask, vlan_id); 3646 else 3647 status = ice_set_vsi_promisc(hw, vsi_handle, 3648 promisc_mask, vlan_id); 3649 if (status) 3650 break; 3651 } 3652 3653 free_fltr_list: 3654 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) { 3655 list_del(&list_itr->list_entry); 3656 devm_kfree(ice_hw_to_dev(hw), list_itr); 3657 } 3658 return status; 3659 } 3660 3661 /** 3662 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI 3663 * @hw: pointer to the hardware structure 3664 * @vsi_handle: VSI handle to remove filters from 3665 * @lkup: switch rule filter lookup type 3666 */ 3667 static void 3668 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, 3669 enum ice_sw_lkup_type lkup) 3670 { 3671 struct ice_switch_info *sw = hw->switch_info; 3672 struct ice_fltr_list_entry *fm_entry; 3673 struct list_head remove_list_head; 3674 struct list_head *rule_head; 3675 struct ice_fltr_list_entry *tmp; 3676 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3677 int status; 3678 3679 INIT_LIST_HEAD(&remove_list_head); 3680 rule_lock = &sw->recp_list[lkup].filt_rule_lock; 3681 rule_head = &sw->recp_list[lkup].filt_rules; 3682 mutex_lock(rule_lock); 3683 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head, 3684 &remove_list_head); 3685 mutex_unlock(rule_lock); 3686 if (status) 3687 goto free_fltr_list; 3688 3689 switch (lkup) { 3690 case ICE_SW_LKUP_MAC: 3691 ice_remove_mac(hw, &remove_list_head); 3692 break; 3693 case ICE_SW_LKUP_VLAN: 3694 ice_remove_vlan(hw, &remove_list_head); 3695 break; 3696 case ICE_SW_LKUP_PROMISC: 3697 case ICE_SW_LKUP_PROMISC_VLAN: 3698 ice_remove_promisc(hw, lkup, &remove_list_head); 3699 break; 3700 case ICE_SW_LKUP_MAC_VLAN: 3701 case ICE_SW_LKUP_ETHERTYPE: 3702 case ICE_SW_LKUP_ETHERTYPE_MAC: 3703 case ICE_SW_LKUP_DFLT: 3704 case ICE_SW_LKUP_LAST: 3705 default: 3706 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup); 3707 break; 3708 } 3709 3710 free_fltr_list: 3711 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 3712 list_del(&fm_entry->list_entry); 3713 devm_kfree(ice_hw_to_dev(hw), fm_entry); 3714 } 3715 } 3716 3717 /** 3718 * ice_remove_vsi_fltr - Remove all filters for a VSI 3719 * @hw: pointer to the hardware structure 3720 * @vsi_handle: VSI handle to remove filters from 3721 */ 3722 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) 3723 { 3724 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC); 3725 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN); 3726 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC); 3727 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN); 3728 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT); 3729 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE); 3730 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC); 3731 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN); 3732 } 3733 3734 /** 3735 * ice_alloc_res_cntr - allocating resource counter 3736 * @hw: pointer to the hardware structure 3737 * @type: type of resource 3738 * @alloc_shared: if set it is shared else dedicated 3739 * @num_items: number of entries requested for FD resource type 3740 * @counter_id: counter index returned by AQ call 3741 */ 3742 int 3743 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 3744 u16 *counter_id) 3745 { 3746 struct ice_aqc_alloc_free_res_elem *buf; 3747 u16 buf_len; 3748 int status; 3749 3750 /* Allocate resource */ 3751 buf_len = struct_size(buf, elem, 1); 3752 buf = kzalloc(buf_len, GFP_KERNEL); 3753 if (!buf) 3754 return -ENOMEM; 3755 3756 buf->num_elems = cpu_to_le16(num_items); 3757 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 3758 ICE_AQC_RES_TYPE_M) | alloc_shared); 3759 3760 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 3761 ice_aqc_opc_alloc_res, NULL); 3762 if (status) 3763 goto exit; 3764 3765 *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp); 3766 3767 exit: 3768 kfree(buf); 3769 return status; 3770 } 3771 3772 /** 3773 * ice_free_res_cntr - free resource counter 3774 * @hw: pointer to the hardware structure 3775 * @type: type of resource 3776 * @alloc_shared: if set it is shared else dedicated 3777 * @num_items: number of entries to be freed for FD resource type 3778 * @counter_id: counter ID resource which needs to be freed 3779 */ 3780 int 3781 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 3782 u16 counter_id) 3783 { 3784 struct ice_aqc_alloc_free_res_elem *buf; 3785 u16 buf_len; 3786 int status; 3787 3788 /* Free resource */ 3789 buf_len = struct_size(buf, elem, 1); 3790 buf = kzalloc(buf_len, GFP_KERNEL); 3791 if (!buf) 3792 return -ENOMEM; 3793 3794 buf->num_elems = cpu_to_le16(num_items); 3795 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 3796 ICE_AQC_RES_TYPE_M) | alloc_shared); 3797 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id); 3798 3799 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 3800 ice_aqc_opc_free_res, NULL); 3801 if (status) 3802 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n"); 3803 3804 kfree(buf); 3805 return status; 3806 } 3807 3808 /* This is mapping table entry that maps every word within a given protocol 3809 * structure to the real byte offset as per the specification of that 3810 * protocol header. 3811 * for example dst address is 3 words in ethertype header and corresponding 3812 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8 3813 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a 3814 * matching entry describing its field. This needs to be updated if new 3815 * structure is added to that union. 3816 */ 3817 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { 3818 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } }, 3819 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } }, 3820 { ICE_ETYPE_OL, { 0 } }, 3821 { ICE_VLAN_OFOS, { 2, 0 } }, 3822 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, 3823 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, 3824 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 3825 26, 28, 30, 32, 34, 36, 38 } }, 3826 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 3827 26, 28, 30, 32, 34, 36, 38 } }, 3828 { ICE_TCP_IL, { 0, 2 } }, 3829 { ICE_UDP_OF, { 0, 2 } }, 3830 { ICE_UDP_ILOS, { 0, 2 } }, 3831 { ICE_VXLAN, { 8, 10, 12, 14 } }, 3832 { ICE_GENEVE, { 8, 10, 12, 14 } }, 3833 { ICE_NVGRE, { 0, 2, 4, 6 } }, 3834 }; 3835 3836 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { 3837 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW }, 3838 { ICE_MAC_IL, ICE_MAC_IL_HW }, 3839 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW }, 3840 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW }, 3841 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW }, 3842 { ICE_IPV4_IL, ICE_IPV4_IL_HW }, 3843 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW }, 3844 { ICE_IPV6_IL, ICE_IPV6_IL_HW }, 3845 { ICE_TCP_IL, ICE_TCP_IL_HW }, 3846 { ICE_UDP_OF, ICE_UDP_OF_HW }, 3847 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW }, 3848 { ICE_VXLAN, ICE_UDP_OF_HW }, 3849 { ICE_GENEVE, ICE_UDP_OF_HW }, 3850 { ICE_NVGRE, ICE_GRE_OF_HW }, 3851 }; 3852 3853 /** 3854 * ice_find_recp - find a recipe 3855 * @hw: pointer to the hardware structure 3856 * @lkup_exts: extension sequence to match 3857 * @tun_type: type of recipe tunnel 3858 * 3859 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. 3860 */ 3861 static u16 3862 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, 3863 enum ice_sw_tunnel_type tun_type) 3864 { 3865 bool refresh_required = true; 3866 struct ice_sw_recipe *recp; 3867 u8 i; 3868 3869 /* Walk through existing recipes to find a match */ 3870 recp = hw->switch_info->recp_list; 3871 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 3872 /* If recipe was not created for this ID, in SW bookkeeping, 3873 * check if FW has an entry for this recipe. If the FW has an 3874 * entry update it in our SW bookkeeping and continue with the 3875 * matching. 3876 */ 3877 if (!recp[i].recp_created) 3878 if (ice_get_recp_frm_fw(hw, 3879 hw->switch_info->recp_list, i, 3880 &refresh_required)) 3881 continue; 3882 3883 /* Skip inverse action recipes */ 3884 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl & 3885 ICE_AQ_RECIPE_ACT_INV_ACT) 3886 continue; 3887 3888 /* if number of words we are looking for match */ 3889 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) { 3890 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words; 3891 struct ice_fv_word *be = lkup_exts->fv_words; 3892 u16 *cr = recp[i].lkup_exts.field_mask; 3893 u16 *de = lkup_exts->field_mask; 3894 bool found = true; 3895 u8 pe, qr; 3896 3897 /* ar, cr, and qr are related to the recipe words, while 3898 * be, de, and pe are related to the lookup words 3899 */ 3900 for (pe = 0; pe < lkup_exts->n_val_words; pe++) { 3901 for (qr = 0; qr < recp[i].lkup_exts.n_val_words; 3902 qr++) { 3903 if (ar[qr].off == be[pe].off && 3904 ar[qr].prot_id == be[pe].prot_id && 3905 cr[qr] == de[pe]) 3906 /* Found the "pe"th word in the 3907 * given recipe 3908 */ 3909 break; 3910 } 3911 /* After walking through all the words in the 3912 * "i"th recipe if "p"th word was not found then 3913 * this recipe is not what we are looking for. 3914 * So break out from this loop and try the next 3915 * recipe 3916 */ 3917 if (qr >= recp[i].lkup_exts.n_val_words) { 3918 found = false; 3919 break; 3920 } 3921 } 3922 /* If for "i"th recipe the found was never set to false 3923 * then it means we found our match 3924 * Also tun type of recipe needs to be checked 3925 */ 3926 if (found && recp[i].tun_type == tun_type) 3927 return i; /* Return the recipe ID */ 3928 } 3929 } 3930 return ICE_MAX_NUM_RECIPES; 3931 } 3932 3933 /** 3934 * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl 3935 * 3936 * As protocol id for outer vlan is different in dvm and svm, if dvm is 3937 * supported protocol array record for outer vlan has to be modified to 3938 * reflect the value proper for DVM. 3939 */ 3940 void ice_change_proto_id_to_dvm(void) 3941 { 3942 u8 i; 3943 3944 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++) 3945 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS && 3946 ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW) 3947 ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW; 3948 } 3949 3950 /** 3951 * ice_prot_type_to_id - get protocol ID from protocol type 3952 * @type: protocol type 3953 * @id: pointer to variable that will receive the ID 3954 * 3955 * Returns true if found, false otherwise 3956 */ 3957 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id) 3958 { 3959 u8 i; 3960 3961 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++) 3962 if (ice_prot_id_tbl[i].type == type) { 3963 *id = ice_prot_id_tbl[i].protocol_id; 3964 return true; 3965 } 3966 return false; 3967 } 3968 3969 /** 3970 * ice_fill_valid_words - count valid words 3971 * @rule: advanced rule with lookup information 3972 * @lkup_exts: byte offset extractions of the words that are valid 3973 * 3974 * calculate valid words in a lookup rule using mask value 3975 */ 3976 static u8 3977 ice_fill_valid_words(struct ice_adv_lkup_elem *rule, 3978 struct ice_prot_lkup_ext *lkup_exts) 3979 { 3980 u8 j, word, prot_id, ret_val; 3981 3982 if (!ice_prot_type_to_id(rule->type, &prot_id)) 3983 return 0; 3984 3985 word = lkup_exts->n_val_words; 3986 3987 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++) 3988 if (((u16 *)&rule->m_u)[j] && 3989 rule->type < ARRAY_SIZE(ice_prot_ext)) { 3990 /* No more space to accommodate */ 3991 if (word >= ICE_MAX_CHAIN_WORDS) 3992 return 0; 3993 lkup_exts->fv_words[word].off = 3994 ice_prot_ext[rule->type].offs[j]; 3995 lkup_exts->fv_words[word].prot_id = 3996 ice_prot_id_tbl[rule->type].protocol_id; 3997 lkup_exts->field_mask[word] = 3998 be16_to_cpu(((__force __be16 *)&rule->m_u)[j]); 3999 word++; 4000 } 4001 4002 ret_val = word - lkup_exts->n_val_words; 4003 lkup_exts->n_val_words = word; 4004 4005 return ret_val; 4006 } 4007 4008 /** 4009 * ice_create_first_fit_recp_def - Create a recipe grouping 4010 * @hw: pointer to the hardware structure 4011 * @lkup_exts: an array of protocol header extractions 4012 * @rg_list: pointer to a list that stores new recipe groups 4013 * @recp_cnt: pointer to a variable that stores returned number of recipe groups 4014 * 4015 * Using first fit algorithm, take all the words that are still not done 4016 * and start grouping them in 4-word groups. Each group makes up one 4017 * recipe. 4018 */ 4019 static int 4020 ice_create_first_fit_recp_def(struct ice_hw *hw, 4021 struct ice_prot_lkup_ext *lkup_exts, 4022 struct list_head *rg_list, 4023 u8 *recp_cnt) 4024 { 4025 struct ice_pref_recipe_group *grp = NULL; 4026 u8 j; 4027 4028 *recp_cnt = 0; 4029 4030 /* Walk through every word in the rule to check if it is not done. If so 4031 * then this word needs to be part of a new recipe. 4032 */ 4033 for (j = 0; j < lkup_exts->n_val_words; j++) 4034 if (!test_bit(j, lkup_exts->done)) { 4035 if (!grp || 4036 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) { 4037 struct ice_recp_grp_entry *entry; 4038 4039 entry = devm_kzalloc(ice_hw_to_dev(hw), 4040 sizeof(*entry), 4041 GFP_KERNEL); 4042 if (!entry) 4043 return -ENOMEM; 4044 list_add(&entry->l_entry, rg_list); 4045 grp = &entry->r_group; 4046 (*recp_cnt)++; 4047 } 4048 4049 grp->pairs[grp->n_val_pairs].prot_id = 4050 lkup_exts->fv_words[j].prot_id; 4051 grp->pairs[grp->n_val_pairs].off = 4052 lkup_exts->fv_words[j].off; 4053 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j]; 4054 grp->n_val_pairs++; 4055 } 4056 4057 return 0; 4058 } 4059 4060 /** 4061 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group 4062 * @hw: pointer to the hardware structure 4063 * @fv_list: field vector with the extraction sequence information 4064 * @rg_list: recipe groupings with protocol-offset pairs 4065 * 4066 * Helper function to fill in the field vector indices for protocol-offset 4067 * pairs. These indexes are then ultimately programmed into a recipe. 4068 */ 4069 static int 4070 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, 4071 struct list_head *rg_list) 4072 { 4073 struct ice_sw_fv_list_entry *fv; 4074 struct ice_recp_grp_entry *rg; 4075 struct ice_fv_word *fv_ext; 4076 4077 if (list_empty(fv_list)) 4078 return 0; 4079 4080 fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry, 4081 list_entry); 4082 fv_ext = fv->fv_ptr->ew; 4083 4084 list_for_each_entry(rg, rg_list, l_entry) { 4085 u8 i; 4086 4087 for (i = 0; i < rg->r_group.n_val_pairs; i++) { 4088 struct ice_fv_word *pr; 4089 bool found = false; 4090 u16 mask; 4091 u8 j; 4092 4093 pr = &rg->r_group.pairs[i]; 4094 mask = rg->r_group.mask[i]; 4095 4096 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 4097 if (fv_ext[j].prot_id == pr->prot_id && 4098 fv_ext[j].off == pr->off) { 4099 found = true; 4100 4101 /* Store index of field vector */ 4102 rg->fv_idx[i] = j; 4103 rg->fv_mask[i] = mask; 4104 break; 4105 } 4106 4107 /* Protocol/offset could not be found, caller gave an 4108 * invalid pair 4109 */ 4110 if (!found) 4111 return -EINVAL; 4112 } 4113 } 4114 4115 return 0; 4116 } 4117 4118 /** 4119 * ice_find_free_recp_res_idx - find free result indexes for recipe 4120 * @hw: pointer to hardware structure 4121 * @profiles: bitmap of profiles that will be associated with the new recipe 4122 * @free_idx: pointer to variable to receive the free index bitmap 4123 * 4124 * The algorithm used here is: 4125 * 1. When creating a new recipe, create a set P which contains all 4126 * Profiles that will be associated with our new recipe 4127 * 4128 * 2. For each Profile p in set P: 4129 * a. Add all recipes associated with Profile p into set R 4130 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes 4131 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF] 4132 * i. Or just assume they all have the same possible indexes: 4133 * 44, 45, 46, 47 4134 * i.e., PossibleIndexes = 0x0000F00000000000 4135 * 4136 * 3. For each Recipe r in set R: 4137 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes 4138 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes 4139 * 4140 * FreeIndexes will contain the bits indicating the indexes free for use, 4141 * then the code needs to update the recipe[r].used_result_idx_bits to 4142 * indicate which indexes were selected for use by this recipe. 4143 */ 4144 static u16 4145 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, 4146 unsigned long *free_idx) 4147 { 4148 DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS); 4149 DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES); 4150 DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS); 4151 u16 bit; 4152 4153 bitmap_zero(recipes, ICE_MAX_NUM_RECIPES); 4154 bitmap_zero(used_idx, ICE_MAX_FV_WORDS); 4155 4156 bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS); 4157 4158 /* For each profile we are going to associate the recipe with, add the 4159 * recipes that are associated with that profile. This will give us 4160 * the set of recipes that our recipe may collide with. Also, determine 4161 * what possible result indexes are usable given this set of profiles. 4162 */ 4163 for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) { 4164 bitmap_or(recipes, recipes, profile_to_recipe[bit], 4165 ICE_MAX_NUM_RECIPES); 4166 bitmap_and(possible_idx, possible_idx, 4167 hw->switch_info->prof_res_bm[bit], 4168 ICE_MAX_FV_WORDS); 4169 } 4170 4171 /* For each recipe that our new recipe may collide with, determine 4172 * which indexes have been used. 4173 */ 4174 for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES) 4175 bitmap_or(used_idx, used_idx, 4176 hw->switch_info->recp_list[bit].res_idxs, 4177 ICE_MAX_FV_WORDS); 4178 4179 bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS); 4180 4181 /* return number of free indexes */ 4182 return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS); 4183 } 4184 4185 /** 4186 * ice_add_sw_recipe - function to call AQ calls to create switch recipe 4187 * @hw: pointer to hardware structure 4188 * @rm: recipe management list entry 4189 * @profiles: bitmap of profiles that will be associated. 4190 */ 4191 static int 4192 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, 4193 unsigned long *profiles) 4194 { 4195 DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); 4196 struct ice_aqc_recipe_data_elem *tmp; 4197 struct ice_aqc_recipe_data_elem *buf; 4198 struct ice_recp_grp_entry *entry; 4199 u16 free_res_idx; 4200 u16 recipe_count; 4201 u8 chain_idx; 4202 u8 recps = 0; 4203 int status; 4204 4205 /* When more than one recipe are required, another recipe is needed to 4206 * chain them together. Matching a tunnel metadata ID takes up one of 4207 * the match fields in the chaining recipe reducing the number of 4208 * chained recipes by one. 4209 */ 4210 /* check number of free result indices */ 4211 bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS); 4212 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm); 4213 4214 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n", 4215 free_res_idx, rm->n_grp_count); 4216 4217 if (rm->n_grp_count > 1) { 4218 if (rm->n_grp_count > free_res_idx) 4219 return -ENOSPC; 4220 4221 rm->n_grp_count++; 4222 } 4223 4224 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) 4225 return -ENOSPC; 4226 4227 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); 4228 if (!tmp) 4229 return -ENOMEM; 4230 4231 buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), 4232 GFP_KERNEL); 4233 if (!buf) { 4234 status = -ENOMEM; 4235 goto err_mem; 4236 } 4237 4238 bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); 4239 recipe_count = ICE_MAX_NUM_RECIPES; 4240 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC, 4241 NULL); 4242 if (status || recipe_count == 0) 4243 goto err_unroll; 4244 4245 /* Allocate the recipe resources, and configure them according to the 4246 * match fields from protocol headers and extracted field vectors. 4247 */ 4248 chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); 4249 list_for_each_entry(entry, &rm->rg_list, l_entry) { 4250 u8 i; 4251 4252 status = ice_alloc_recipe(hw, &entry->rid); 4253 if (status) 4254 goto err_unroll; 4255 4256 /* Clear the result index of the located recipe, as this will be 4257 * updated, if needed, later in the recipe creation process. 4258 */ 4259 tmp[0].content.result_indx = 0; 4260 4261 buf[recps] = tmp[0]; 4262 buf[recps].recipe_indx = (u8)entry->rid; 4263 /* if the recipe is a non-root recipe RID should be programmed 4264 * as 0 for the rules to be applied correctly. 4265 */ 4266 buf[recps].content.rid = 0; 4267 memset(&buf[recps].content.lkup_indx, 0, 4268 sizeof(buf[recps].content.lkup_indx)); 4269 4270 /* All recipes use look-up index 0 to match switch ID. */ 4271 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 4272 buf[recps].content.mask[0] = 4273 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 4274 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask 4275 * to be 0 4276 */ 4277 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { 4278 buf[recps].content.lkup_indx[i] = 0x80; 4279 buf[recps].content.mask[i] = 0; 4280 } 4281 4282 for (i = 0; i < entry->r_group.n_val_pairs; i++) { 4283 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i]; 4284 buf[recps].content.mask[i + 1] = 4285 cpu_to_le16(entry->fv_mask[i]); 4286 } 4287 4288 if (rm->n_grp_count > 1) { 4289 /* Checks to see if there really is a valid result index 4290 * that can be used. 4291 */ 4292 if (chain_idx >= ICE_MAX_FV_WORDS) { 4293 ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); 4294 status = -ENOSPC; 4295 goto err_unroll; 4296 } 4297 4298 entry->chain_idx = chain_idx; 4299 buf[recps].content.result_indx = 4300 ICE_AQ_RECIPE_RESULT_EN | 4301 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & 4302 ICE_AQ_RECIPE_RESULT_DATA_M); 4303 clear_bit(chain_idx, result_idx_bm); 4304 chain_idx = find_first_bit(result_idx_bm, 4305 ICE_MAX_FV_WORDS); 4306 } 4307 4308 /* fill recipe dependencies */ 4309 bitmap_zero((unsigned long *)buf[recps].recipe_bitmap, 4310 ICE_MAX_NUM_RECIPES); 4311 set_bit(buf[recps].recipe_indx, 4312 (unsigned long *)buf[recps].recipe_bitmap); 4313 buf[recps].content.act_ctrl_fwd_priority = rm->priority; 4314 recps++; 4315 } 4316 4317 if (rm->n_grp_count == 1) { 4318 rm->root_rid = buf[0].recipe_indx; 4319 set_bit(buf[0].recipe_indx, rm->r_bitmap); 4320 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT; 4321 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) { 4322 memcpy(buf[0].recipe_bitmap, rm->r_bitmap, 4323 sizeof(buf[0].recipe_bitmap)); 4324 } else { 4325 status = -EINVAL; 4326 goto err_unroll; 4327 } 4328 /* Applicable only for ROOT_RECIPE, set the fwd_priority for 4329 * the recipe which is getting created if specified 4330 * by user. Usually any advanced switch filter, which results 4331 * into new extraction sequence, ended up creating a new recipe 4332 * of type ROOT and usually recipes are associated with profiles 4333 * Switch rule referreing newly created recipe, needs to have 4334 * either/or 'fwd' or 'join' priority, otherwise switch rule 4335 * evaluation will not happen correctly. In other words, if 4336 * switch rule to be evaluated on priority basis, then recipe 4337 * needs to have priority, otherwise it will be evaluated last. 4338 */ 4339 buf[0].content.act_ctrl_fwd_priority = rm->priority; 4340 } else { 4341 struct ice_recp_grp_entry *last_chain_entry; 4342 u16 rid, i; 4343 4344 /* Allocate the last recipe that will chain the outcomes of the 4345 * other recipes together 4346 */ 4347 status = ice_alloc_recipe(hw, &rid); 4348 if (status) 4349 goto err_unroll; 4350 4351 buf[recps].recipe_indx = (u8)rid; 4352 buf[recps].content.rid = (u8)rid; 4353 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT; 4354 /* the new entry created should also be part of rg_list to 4355 * make sure we have complete recipe 4356 */ 4357 last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw), 4358 sizeof(*last_chain_entry), 4359 GFP_KERNEL); 4360 if (!last_chain_entry) { 4361 status = -ENOMEM; 4362 goto err_unroll; 4363 } 4364 last_chain_entry->rid = rid; 4365 memset(&buf[recps].content.lkup_indx, 0, 4366 sizeof(buf[recps].content.lkup_indx)); 4367 /* All recipes use look-up index 0 to match switch ID. */ 4368 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 4369 buf[recps].content.mask[0] = 4370 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 4371 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { 4372 buf[recps].content.lkup_indx[i] = 4373 ICE_AQ_RECIPE_LKUP_IGNORE; 4374 buf[recps].content.mask[i] = 0; 4375 } 4376 4377 i = 1; 4378 /* update r_bitmap with the recp that is used for chaining */ 4379 set_bit(rid, rm->r_bitmap); 4380 /* this is the recipe that chains all the other recipes so it 4381 * should not have a chaining ID to indicate the same 4382 */ 4383 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; 4384 list_for_each_entry(entry, &rm->rg_list, l_entry) { 4385 last_chain_entry->fv_idx[i] = entry->chain_idx; 4386 buf[recps].content.lkup_indx[i] = entry->chain_idx; 4387 buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF); 4388 set_bit(entry->rid, rm->r_bitmap); 4389 } 4390 list_add(&last_chain_entry->l_entry, &rm->rg_list); 4391 if (sizeof(buf[recps].recipe_bitmap) >= 4392 sizeof(rm->r_bitmap)) { 4393 memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, 4394 sizeof(buf[recps].recipe_bitmap)); 4395 } else { 4396 status = -EINVAL; 4397 goto err_unroll; 4398 } 4399 buf[recps].content.act_ctrl_fwd_priority = rm->priority; 4400 4401 recps++; 4402 rm->root_rid = (u8)rid; 4403 } 4404 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 4405 if (status) 4406 goto err_unroll; 4407 4408 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL); 4409 ice_release_change_lock(hw); 4410 if (status) 4411 goto err_unroll; 4412 4413 /* Every recipe that just got created add it to the recipe 4414 * book keeping list 4415 */ 4416 list_for_each_entry(entry, &rm->rg_list, l_entry) { 4417 struct ice_switch_info *sw = hw->switch_info; 4418 bool is_root, idx_found = false; 4419 struct ice_sw_recipe *recp; 4420 u16 idx, buf_idx = 0; 4421 4422 /* find buffer index for copying some data */ 4423 for (idx = 0; idx < rm->n_grp_count; idx++) 4424 if (buf[idx].recipe_indx == entry->rid) { 4425 buf_idx = idx; 4426 idx_found = true; 4427 } 4428 4429 if (!idx_found) { 4430 status = -EIO; 4431 goto err_unroll; 4432 } 4433 4434 recp = &sw->recp_list[entry->rid]; 4435 is_root = (rm->root_rid == entry->rid); 4436 recp->is_root = is_root; 4437 4438 recp->root_rid = entry->rid; 4439 recp->big_recp = (is_root && rm->n_grp_count > 1); 4440 4441 memcpy(&recp->ext_words, entry->r_group.pairs, 4442 entry->r_group.n_val_pairs * sizeof(struct ice_fv_word)); 4443 4444 memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap, 4445 sizeof(recp->r_bitmap)); 4446 4447 /* Copy non-result fv index values and masks to recipe. This 4448 * call will also update the result recipe bitmask. 4449 */ 4450 ice_collect_result_idx(&buf[buf_idx], recp); 4451 4452 /* for non-root recipes, also copy to the root, this allows 4453 * easier matching of a complete chained recipe 4454 */ 4455 if (!is_root) 4456 ice_collect_result_idx(&buf[buf_idx], 4457 &sw->recp_list[rm->root_rid]); 4458 4459 recp->n_ext_words = entry->r_group.n_val_pairs; 4460 recp->chain_idx = entry->chain_idx; 4461 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; 4462 recp->n_grp_count = rm->n_grp_count; 4463 recp->tun_type = rm->tun_type; 4464 recp->recp_created = true; 4465 } 4466 rm->root_buf = buf; 4467 kfree(tmp); 4468 return status; 4469 4470 err_unroll: 4471 err_mem: 4472 kfree(tmp); 4473 devm_kfree(ice_hw_to_dev(hw), buf); 4474 return status; 4475 } 4476 4477 /** 4478 * ice_create_recipe_group - creates recipe group 4479 * @hw: pointer to hardware structure 4480 * @rm: recipe management list entry 4481 * @lkup_exts: lookup elements 4482 */ 4483 static int 4484 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, 4485 struct ice_prot_lkup_ext *lkup_exts) 4486 { 4487 u8 recp_count = 0; 4488 int status; 4489 4490 rm->n_grp_count = 0; 4491 4492 /* Create recipes for words that are marked not done by packing them 4493 * as best fit. 4494 */ 4495 status = ice_create_first_fit_recp_def(hw, lkup_exts, 4496 &rm->rg_list, &recp_count); 4497 if (!status) { 4498 rm->n_grp_count += recp_count; 4499 rm->n_ext_words = lkup_exts->n_val_words; 4500 memcpy(&rm->ext_words, lkup_exts->fv_words, 4501 sizeof(rm->ext_words)); 4502 memcpy(rm->word_masks, lkup_exts->field_mask, 4503 sizeof(rm->word_masks)); 4504 } 4505 4506 return status; 4507 } 4508 4509 /** 4510 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types 4511 * @hw: pointer to hardware structure 4512 * @lkups: lookup elements or match criteria for the advanced recipe, one 4513 * structure per protocol header 4514 * @lkups_cnt: number of protocols 4515 * @bm: bitmap of field vectors to consider 4516 * @fv_list: pointer to a list that holds the returned field vectors 4517 */ 4518 static int 4519 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 4520 unsigned long *bm, struct list_head *fv_list) 4521 { 4522 u8 *prot_ids; 4523 int status; 4524 u16 i; 4525 4526 prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL); 4527 if (!prot_ids) 4528 return -ENOMEM; 4529 4530 for (i = 0; i < lkups_cnt; i++) 4531 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) { 4532 status = -EIO; 4533 goto free_mem; 4534 } 4535 4536 /* Find field vectors that include all specified protocol types */ 4537 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list); 4538 4539 free_mem: 4540 kfree(prot_ids); 4541 return status; 4542 } 4543 4544 /** 4545 * ice_tun_type_match_word - determine if tun type needs a match mask 4546 * @tun_type: tunnel type 4547 * @mask: mask to be used for the tunnel 4548 */ 4549 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) 4550 { 4551 switch (tun_type) { 4552 case ICE_SW_TUN_GENEVE: 4553 case ICE_SW_TUN_VXLAN: 4554 case ICE_SW_TUN_NVGRE: 4555 *mask = ICE_TUN_FLAG_MASK; 4556 return true; 4557 4558 default: 4559 *mask = 0; 4560 return false; 4561 } 4562 } 4563 4564 /** 4565 * ice_add_special_words - Add words that are not protocols, such as metadata 4566 * @rinfo: other information regarding the rule e.g. priority and action info 4567 * @lkup_exts: lookup word structure 4568 */ 4569 static int 4570 ice_add_special_words(struct ice_adv_rule_info *rinfo, 4571 struct ice_prot_lkup_ext *lkup_exts) 4572 { 4573 u16 mask; 4574 4575 /* If this is a tunneled packet, then add recipe index to match the 4576 * tunnel bit in the packet metadata flags. 4577 */ 4578 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) { 4579 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) { 4580 u8 word = lkup_exts->n_val_words++; 4581 4582 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW; 4583 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF; 4584 lkup_exts->field_mask[word] = mask; 4585 } else { 4586 return -ENOSPC; 4587 } 4588 } 4589 4590 return 0; 4591 } 4592 4593 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule 4594 * @hw: pointer to hardware structure 4595 * @rinfo: other information regarding the rule e.g. priority and action info 4596 * @bm: pointer to memory for returning the bitmap of field vectors 4597 */ 4598 static void 4599 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, 4600 unsigned long *bm) 4601 { 4602 enum ice_prof_type prof_type; 4603 4604 bitmap_zero(bm, ICE_MAX_NUM_PROFILES); 4605 4606 switch (rinfo->tun_type) { 4607 case ICE_NON_TUN: 4608 prof_type = ICE_PROF_NON_TUN; 4609 break; 4610 case ICE_ALL_TUNNELS: 4611 prof_type = ICE_PROF_TUN_ALL; 4612 break; 4613 case ICE_SW_TUN_GENEVE: 4614 case ICE_SW_TUN_VXLAN: 4615 prof_type = ICE_PROF_TUN_UDP; 4616 break; 4617 case ICE_SW_TUN_NVGRE: 4618 prof_type = ICE_PROF_TUN_GRE; 4619 break; 4620 default: 4621 prof_type = ICE_PROF_ALL; 4622 break; 4623 } 4624 4625 ice_get_sw_fv_bitmap(hw, prof_type, bm); 4626 } 4627 4628 /** 4629 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default 4630 * @hw: pointer to hardware structure 4631 * @lkups: lookup elements or match criteria for the advanced recipe, one 4632 * structure per protocol header 4633 * @lkups_cnt: number of protocols 4634 * @rinfo: other information regarding the rule e.g. priority and action info 4635 * @rid: return the recipe ID of the recipe created 4636 */ 4637 static int 4638 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 4639 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid) 4640 { 4641 DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES); 4642 DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES); 4643 struct ice_prot_lkup_ext *lkup_exts; 4644 struct ice_recp_grp_entry *r_entry; 4645 struct ice_sw_fv_list_entry *fvit; 4646 struct ice_recp_grp_entry *r_tmp; 4647 struct ice_sw_fv_list_entry *tmp; 4648 struct ice_sw_recipe *rm; 4649 int status = 0; 4650 u8 i; 4651 4652 if (!lkups_cnt) 4653 return -EINVAL; 4654 4655 lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL); 4656 if (!lkup_exts) 4657 return -ENOMEM; 4658 4659 /* Determine the number of words to be matched and if it exceeds a 4660 * recipe's restrictions 4661 */ 4662 for (i = 0; i < lkups_cnt; i++) { 4663 u16 count; 4664 4665 if (lkups[i].type >= ICE_PROTOCOL_LAST) { 4666 status = -EIO; 4667 goto err_free_lkup_exts; 4668 } 4669 4670 count = ice_fill_valid_words(&lkups[i], lkup_exts); 4671 if (!count) { 4672 status = -EIO; 4673 goto err_free_lkup_exts; 4674 } 4675 } 4676 4677 rm = kzalloc(sizeof(*rm), GFP_KERNEL); 4678 if (!rm) { 4679 status = -ENOMEM; 4680 goto err_free_lkup_exts; 4681 } 4682 4683 /* Get field vectors that contain fields extracted from all the protocol 4684 * headers being programmed. 4685 */ 4686 INIT_LIST_HEAD(&rm->fv_list); 4687 INIT_LIST_HEAD(&rm->rg_list); 4688 4689 /* Get bitmap of field vectors (profiles) that are compatible with the 4690 * rule request; only these will be searched in the subsequent call to 4691 * ice_get_fv. 4692 */ 4693 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap); 4694 4695 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list); 4696 if (status) 4697 goto err_unroll; 4698 4699 /* Create any special protocol/offset pairs, such as looking at tunnel 4700 * bits by extracting metadata 4701 */ 4702 status = ice_add_special_words(rinfo, lkup_exts); 4703 if (status) 4704 goto err_free_lkup_exts; 4705 4706 /* Group match words into recipes using preferred recipe grouping 4707 * criteria. 4708 */ 4709 status = ice_create_recipe_group(hw, rm, lkup_exts); 4710 if (status) 4711 goto err_unroll; 4712 4713 /* set the recipe priority if specified */ 4714 rm->priority = (u8)rinfo->priority; 4715 4716 /* Find offsets from the field vector. Pick the first one for all the 4717 * recipes. 4718 */ 4719 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list); 4720 if (status) 4721 goto err_unroll; 4722 4723 /* get bitmap of all profiles the recipe will be associated with */ 4724 bitmap_zero(profiles, ICE_MAX_NUM_PROFILES); 4725 list_for_each_entry(fvit, &rm->fv_list, list_entry) { 4726 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id); 4727 set_bit((u16)fvit->profile_id, profiles); 4728 } 4729 4730 /* Look for a recipe which matches our requested fv / mask list */ 4731 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type); 4732 if (*rid < ICE_MAX_NUM_RECIPES) 4733 /* Success if found a recipe that match the existing criteria */ 4734 goto err_unroll; 4735 4736 rm->tun_type = rinfo->tun_type; 4737 /* Recipe we need does not exist, add a recipe */ 4738 status = ice_add_sw_recipe(hw, rm, profiles); 4739 if (status) 4740 goto err_unroll; 4741 4742 /* Associate all the recipes created with all the profiles in the 4743 * common field vector. 4744 */ 4745 list_for_each_entry(fvit, &rm->fv_list, list_entry) { 4746 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); 4747 u16 j; 4748 4749 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id, 4750 (u8 *)r_bitmap, NULL); 4751 if (status) 4752 goto err_unroll; 4753 4754 bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap, 4755 ICE_MAX_NUM_RECIPES); 4756 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 4757 if (status) 4758 goto err_unroll; 4759 4760 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id, 4761 (u8 *)r_bitmap, 4762 NULL); 4763 ice_release_change_lock(hw); 4764 4765 if (status) 4766 goto err_unroll; 4767 4768 /* Update profile to recipe bitmap array */ 4769 bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap, 4770 ICE_MAX_NUM_RECIPES); 4771 4772 /* Update recipe to profile bitmap array */ 4773 for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES) 4774 set_bit((u16)fvit->profile_id, recipe_to_profile[j]); 4775 } 4776 4777 *rid = rm->root_rid; 4778 memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts, 4779 sizeof(*lkup_exts)); 4780 err_unroll: 4781 list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) { 4782 list_del(&r_entry->l_entry); 4783 devm_kfree(ice_hw_to_dev(hw), r_entry); 4784 } 4785 4786 list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) { 4787 list_del(&fvit->list_entry); 4788 devm_kfree(ice_hw_to_dev(hw), fvit); 4789 } 4790 4791 if (rm->root_buf) 4792 devm_kfree(ice_hw_to_dev(hw), rm->root_buf); 4793 4794 kfree(rm); 4795 4796 err_free_lkup_exts: 4797 kfree(lkup_exts); 4798 4799 return status; 4800 } 4801 4802 /** 4803 * ice_find_dummy_packet - find dummy packet 4804 * 4805 * @lkups: lookup elements or match criteria for the advanced recipe, one 4806 * structure per protocol header 4807 * @lkups_cnt: number of protocols 4808 * @tun_type: tunnel type 4809 * @pkt: dummy packet to fill according to filter match criteria 4810 * @pkt_len: packet length of dummy packet 4811 * @offsets: pointer to receive the pointer to the offsets for the packet 4812 */ 4813 static void 4814 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 4815 enum ice_sw_tunnel_type tun_type, 4816 const u8 **pkt, u16 *pkt_len, 4817 const struct ice_dummy_pkt_offsets **offsets) 4818 { 4819 bool tcp = false, udp = false, ipv6 = false, vlan = false; 4820 u16 i; 4821 4822 for (i = 0; i < lkups_cnt; i++) { 4823 if (lkups[i].type == ICE_UDP_ILOS) 4824 udp = true; 4825 else if (lkups[i].type == ICE_TCP_IL) 4826 tcp = true; 4827 else if (lkups[i].type == ICE_IPV6_OFOS) 4828 ipv6 = true; 4829 else if (lkups[i].type == ICE_VLAN_OFOS) 4830 vlan = true; 4831 else if (lkups[i].type == ICE_ETYPE_OL && 4832 lkups[i].h_u.ethertype.ethtype_id == 4833 cpu_to_be16(ICE_IPV6_ETHER_ID) && 4834 lkups[i].m_u.ethertype.ethtype_id == 4835 cpu_to_be16(0xFFFF)) 4836 ipv6 = true; 4837 } 4838 4839 if (tun_type == ICE_SW_TUN_NVGRE) { 4840 if (tcp) { 4841 *pkt = dummy_gre_tcp_packet; 4842 *pkt_len = sizeof(dummy_gre_tcp_packet); 4843 *offsets = dummy_gre_tcp_packet_offsets; 4844 return; 4845 } 4846 4847 *pkt = dummy_gre_udp_packet; 4848 *pkt_len = sizeof(dummy_gre_udp_packet); 4849 *offsets = dummy_gre_udp_packet_offsets; 4850 return; 4851 } 4852 4853 if (tun_type == ICE_SW_TUN_VXLAN || 4854 tun_type == ICE_SW_TUN_GENEVE) { 4855 if (tcp) { 4856 *pkt = dummy_udp_tun_tcp_packet; 4857 *pkt_len = sizeof(dummy_udp_tun_tcp_packet); 4858 *offsets = dummy_udp_tun_tcp_packet_offsets; 4859 return; 4860 } 4861 4862 *pkt = dummy_udp_tun_udp_packet; 4863 *pkt_len = sizeof(dummy_udp_tun_udp_packet); 4864 *offsets = dummy_udp_tun_udp_packet_offsets; 4865 return; 4866 } 4867 4868 if (udp && !ipv6) { 4869 if (vlan) { 4870 *pkt = dummy_vlan_udp_packet; 4871 *pkt_len = sizeof(dummy_vlan_udp_packet); 4872 *offsets = dummy_vlan_udp_packet_offsets; 4873 return; 4874 } 4875 *pkt = dummy_udp_packet; 4876 *pkt_len = sizeof(dummy_udp_packet); 4877 *offsets = dummy_udp_packet_offsets; 4878 return; 4879 } else if (udp && ipv6) { 4880 if (vlan) { 4881 *pkt = dummy_vlan_udp_ipv6_packet; 4882 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet); 4883 *offsets = dummy_vlan_udp_ipv6_packet_offsets; 4884 return; 4885 } 4886 *pkt = dummy_udp_ipv6_packet; 4887 *pkt_len = sizeof(dummy_udp_ipv6_packet); 4888 *offsets = dummy_udp_ipv6_packet_offsets; 4889 return; 4890 } else if ((tcp && ipv6) || ipv6) { 4891 if (vlan) { 4892 *pkt = dummy_vlan_tcp_ipv6_packet; 4893 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet); 4894 *offsets = dummy_vlan_tcp_ipv6_packet_offsets; 4895 return; 4896 } 4897 *pkt = dummy_tcp_ipv6_packet; 4898 *pkt_len = sizeof(dummy_tcp_ipv6_packet); 4899 *offsets = dummy_tcp_ipv6_packet_offsets; 4900 return; 4901 } 4902 4903 if (vlan) { 4904 *pkt = dummy_vlan_tcp_packet; 4905 *pkt_len = sizeof(dummy_vlan_tcp_packet); 4906 *offsets = dummy_vlan_tcp_packet_offsets; 4907 } else { 4908 *pkt = dummy_tcp_packet; 4909 *pkt_len = sizeof(dummy_tcp_packet); 4910 *offsets = dummy_tcp_packet_offsets; 4911 } 4912 } 4913 4914 /** 4915 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria 4916 * 4917 * @lkups: lookup elements or match criteria for the advanced recipe, one 4918 * structure per protocol header 4919 * @lkups_cnt: number of protocols 4920 * @s_rule: stores rule information from the match criteria 4921 * @dummy_pkt: dummy packet to fill according to filter match criteria 4922 * @pkt_len: packet length of dummy packet 4923 * @offsets: offset info for the dummy packet 4924 */ 4925 static int 4926 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 4927 struct ice_aqc_sw_rules_elem *s_rule, 4928 const u8 *dummy_pkt, u16 pkt_len, 4929 const struct ice_dummy_pkt_offsets *offsets) 4930 { 4931 u8 *pkt; 4932 u16 i; 4933 4934 /* Start with a packet with a pre-defined/dummy content. Then, fill 4935 * in the header values to be looked up or matched. 4936 */ 4937 pkt = s_rule->pdata.lkup_tx_rx.hdr; 4938 4939 memcpy(pkt, dummy_pkt, pkt_len); 4940 4941 for (i = 0; i < lkups_cnt; i++) { 4942 enum ice_protocol_type type; 4943 u16 offset = 0, len = 0, j; 4944 bool found = false; 4945 4946 /* find the start of this layer; it should be found since this 4947 * was already checked when search for the dummy packet 4948 */ 4949 type = lkups[i].type; 4950 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) { 4951 if (type == offsets[j].type) { 4952 offset = offsets[j].offset; 4953 found = true; 4954 break; 4955 } 4956 } 4957 /* this should never happen in a correct calling sequence */ 4958 if (!found) 4959 return -EINVAL; 4960 4961 switch (lkups[i].type) { 4962 case ICE_MAC_OFOS: 4963 case ICE_MAC_IL: 4964 len = sizeof(struct ice_ether_hdr); 4965 break; 4966 case ICE_ETYPE_OL: 4967 len = sizeof(struct ice_ethtype_hdr); 4968 break; 4969 case ICE_VLAN_OFOS: 4970 len = sizeof(struct ice_vlan_hdr); 4971 break; 4972 case ICE_IPV4_OFOS: 4973 case ICE_IPV4_IL: 4974 len = sizeof(struct ice_ipv4_hdr); 4975 break; 4976 case ICE_IPV6_OFOS: 4977 case ICE_IPV6_IL: 4978 len = sizeof(struct ice_ipv6_hdr); 4979 break; 4980 case ICE_TCP_IL: 4981 case ICE_UDP_OF: 4982 case ICE_UDP_ILOS: 4983 len = sizeof(struct ice_l4_hdr); 4984 break; 4985 case ICE_SCTP_IL: 4986 len = sizeof(struct ice_sctp_hdr); 4987 break; 4988 case ICE_NVGRE: 4989 len = sizeof(struct ice_nvgre_hdr); 4990 break; 4991 case ICE_VXLAN: 4992 case ICE_GENEVE: 4993 len = sizeof(struct ice_udp_tnl_hdr); 4994 break; 4995 default: 4996 return -EINVAL; 4997 } 4998 4999 /* the length should be a word multiple */ 5000 if (len % ICE_BYTES_PER_WORD) 5001 return -EIO; 5002 5003 /* We have the offset to the header start, the length, the 5004 * caller's header values and mask. Use this information to 5005 * copy the data into the dummy packet appropriately based on 5006 * the mask. Note that we need to only write the bits as 5007 * indicated by the mask to make sure we don't improperly write 5008 * over any significant packet data. 5009 */ 5010 for (j = 0; j < len / sizeof(u16); j++) 5011 if (((u16 *)&lkups[i].m_u)[j]) 5012 ((u16 *)(pkt + offset))[j] = 5013 (((u16 *)(pkt + offset))[j] & 5014 ~((u16 *)&lkups[i].m_u)[j]) | 5015 (((u16 *)&lkups[i].h_u)[j] & 5016 ((u16 *)&lkups[i].m_u)[j]); 5017 } 5018 5019 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len); 5020 5021 return 0; 5022 } 5023 5024 /** 5025 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port 5026 * @hw: pointer to the hardware structure 5027 * @tun_type: tunnel type 5028 * @pkt: dummy packet to fill in 5029 * @offsets: offset info for the dummy packet 5030 */ 5031 static int 5032 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, 5033 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets) 5034 { 5035 u16 open_port, i; 5036 5037 switch (tun_type) { 5038 case ICE_SW_TUN_VXLAN: 5039 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN)) 5040 return -EIO; 5041 break; 5042 case ICE_SW_TUN_GENEVE: 5043 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE)) 5044 return -EIO; 5045 break; 5046 default: 5047 /* Nothing needs to be done for this tunnel type */ 5048 return 0; 5049 } 5050 5051 /* Find the outer UDP protocol header and insert the port number */ 5052 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) { 5053 if (offsets[i].type == ICE_UDP_OF) { 5054 struct ice_l4_hdr *hdr; 5055 u16 offset; 5056 5057 offset = offsets[i].offset; 5058 hdr = (struct ice_l4_hdr *)&pkt[offset]; 5059 hdr->dst_port = cpu_to_be16(open_port); 5060 5061 return 0; 5062 } 5063 } 5064 5065 return -EIO; 5066 } 5067 5068 /** 5069 * ice_find_adv_rule_entry - Search a rule entry 5070 * @hw: pointer to the hardware structure 5071 * @lkups: lookup elements or match criteria for the advanced recipe, one 5072 * structure per protocol header 5073 * @lkups_cnt: number of protocols 5074 * @recp_id: recipe ID for which we are finding the rule 5075 * @rinfo: other information regarding the rule e.g. priority and action info 5076 * 5077 * Helper function to search for a given advance rule entry 5078 * Returns pointer to entry storing the rule if found 5079 */ 5080 static struct ice_adv_fltr_mgmt_list_entry * 5081 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 5082 u16 lkups_cnt, u16 recp_id, 5083 struct ice_adv_rule_info *rinfo) 5084 { 5085 struct ice_adv_fltr_mgmt_list_entry *list_itr; 5086 struct ice_switch_info *sw = hw->switch_info; 5087 int i; 5088 5089 list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules, 5090 list_entry) { 5091 bool lkups_matched = true; 5092 5093 if (lkups_cnt != list_itr->lkups_cnt) 5094 continue; 5095 for (i = 0; i < list_itr->lkups_cnt; i++) 5096 if (memcmp(&list_itr->lkups[i], &lkups[i], 5097 sizeof(*lkups))) { 5098 lkups_matched = false; 5099 break; 5100 } 5101 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag && 5102 rinfo->tun_type == list_itr->rule_info.tun_type && 5103 lkups_matched) 5104 return list_itr; 5105 } 5106 return NULL; 5107 } 5108 5109 /** 5110 * ice_adv_add_update_vsi_list 5111 * @hw: pointer to the hardware structure 5112 * @m_entry: pointer to current adv filter management list entry 5113 * @cur_fltr: filter information from the book keeping entry 5114 * @new_fltr: filter information with the new VSI to be added 5115 * 5116 * Call AQ command to add or update previously created VSI list with new VSI. 5117 * 5118 * Helper function to do book keeping associated with adding filter information 5119 * The algorithm to do the booking keeping is described below : 5120 * When a VSI needs to subscribe to a given advanced filter 5121 * if only one VSI has been added till now 5122 * Allocate a new VSI list and add two VSIs 5123 * to this list using switch rule command 5124 * Update the previously created switch rule with the 5125 * newly created VSI list ID 5126 * if a VSI list was previously created 5127 * Add the new VSI to the previously created VSI list set 5128 * using the update switch rule command 5129 */ 5130 static int 5131 ice_adv_add_update_vsi_list(struct ice_hw *hw, 5132 struct ice_adv_fltr_mgmt_list_entry *m_entry, 5133 struct ice_adv_rule_info *cur_fltr, 5134 struct ice_adv_rule_info *new_fltr) 5135 { 5136 u16 vsi_list_id = 0; 5137 int status; 5138 5139 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || 5140 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP || 5141 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET) 5142 return -EOPNOTSUPP; 5143 5144 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || 5145 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) && 5146 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI || 5147 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)) 5148 return -EOPNOTSUPP; 5149 5150 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { 5151 /* Only one entry existed in the mapping and it was not already 5152 * a part of a VSI list. So, create a VSI list with the old and 5153 * new VSIs. 5154 */ 5155 struct ice_fltr_info tmp_fltr; 5156 u16 vsi_handle_arr[2]; 5157 5158 /* A rule already exists with the new VSI being added */ 5159 if (cur_fltr->sw_act.fwd_id.hw_vsi_id == 5160 new_fltr->sw_act.fwd_id.hw_vsi_id) 5161 return -EEXIST; 5162 5163 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle; 5164 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle; 5165 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 5166 &vsi_list_id, 5167 ICE_SW_LKUP_LAST); 5168 if (status) 5169 return status; 5170 5171 memset(&tmp_fltr, 0, sizeof(tmp_fltr)); 5172 tmp_fltr.flag = m_entry->rule_info.sw_act.flag; 5173 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; 5174 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 5175 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 5176 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST; 5177 5178 /* Update the previous switch rule of "forward to VSI" to 5179 * "fwd to VSI list" 5180 */ 5181 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 5182 if (status) 5183 return status; 5184 5185 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id; 5186 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST; 5187 m_entry->vsi_list_info = 5188 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 5189 vsi_list_id); 5190 } else { 5191 u16 vsi_handle = new_fltr->sw_act.vsi_handle; 5192 5193 if (!m_entry->vsi_list_info) 5194 return -EIO; 5195 5196 /* A rule already exists with the new VSI being added */ 5197 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 5198 return 0; 5199 5200 /* Update the previously created VSI list set with 5201 * the new VSI ID passed in 5202 */ 5203 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id; 5204 5205 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, 5206 vsi_list_id, false, 5207 ice_aqc_opc_update_sw_rules, 5208 ICE_SW_LKUP_LAST); 5209 /* update VSI list mapping info with new VSI ID */ 5210 if (!status) 5211 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); 5212 } 5213 if (!status) 5214 m_entry->vsi_count++; 5215 return status; 5216 } 5217 5218 /** 5219 * ice_add_adv_rule - helper function to create an advanced switch rule 5220 * @hw: pointer to the hardware structure 5221 * @lkups: information on the words that needs to be looked up. All words 5222 * together makes one recipe 5223 * @lkups_cnt: num of entries in the lkups array 5224 * @rinfo: other information related to the rule that needs to be programmed 5225 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be 5226 * ignored is case of error. 5227 * 5228 * This function can program only 1 rule at a time. The lkups is used to 5229 * describe the all the words that forms the "lookup" portion of the recipe. 5230 * These words can span multiple protocols. Callers to this function need to 5231 * pass in a list of protocol headers with lookup information along and mask 5232 * that determines which words are valid from the given protocol header. 5233 * rinfo describes other information related to this rule such as forwarding 5234 * IDs, priority of this rule, etc. 5235 */ 5236 int 5237 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 5238 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, 5239 struct ice_rule_query_data *added_entry) 5240 { 5241 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL; 5242 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle; 5243 const struct ice_dummy_pkt_offsets *pkt_offsets; 5244 struct ice_aqc_sw_rules_elem *s_rule = NULL; 5245 struct list_head *rule_head; 5246 struct ice_switch_info *sw; 5247 const u8 *pkt = NULL; 5248 u16 word_cnt; 5249 u32 act = 0; 5250 int status; 5251 u8 q_rgn; 5252 5253 /* Initialize profile to result index bitmap */ 5254 if (!hw->switch_info->prof_res_bm_init) { 5255 hw->switch_info->prof_res_bm_init = 1; 5256 ice_init_prof_result_bm(hw); 5257 } 5258 5259 if (!lkups_cnt) 5260 return -EINVAL; 5261 5262 /* get # of words we need to match */ 5263 word_cnt = 0; 5264 for (i = 0; i < lkups_cnt; i++) { 5265 u16 j, *ptr; 5266 5267 ptr = (u16 *)&lkups[i].m_u; 5268 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++) 5269 if (ptr[j] != 0) 5270 word_cnt++; 5271 } 5272 5273 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) 5274 return -EINVAL; 5275 5276 /* make sure that we can locate a dummy packet */ 5277 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len, 5278 &pkt_offsets); 5279 if (!pkt) { 5280 status = -EINVAL; 5281 goto err_ice_add_adv_rule; 5282 } 5283 5284 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || 5285 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || 5286 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || 5287 rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) 5288 return -EIO; 5289 5290 vsi_handle = rinfo->sw_act.vsi_handle; 5291 if (!ice_is_vsi_valid(hw, vsi_handle)) 5292 return -EINVAL; 5293 5294 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) 5295 rinfo->sw_act.fwd_id.hw_vsi_id = 5296 ice_get_hw_vsi_num(hw, vsi_handle); 5297 if (rinfo->sw_act.flag & ICE_FLTR_TX) 5298 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle); 5299 5300 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid); 5301 if (status) 5302 return status; 5303 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); 5304 if (m_entry) { 5305 /* we have to add VSI to VSI_LIST and increment vsi_count. 5306 * Also Update VSI list so that we can change forwarding rule 5307 * if the rule already exists, we will check if it exists with 5308 * same vsi_id, if not then add it to the VSI list if it already 5309 * exists if not then create a VSI list and add the existing VSI 5310 * ID and the new VSI ID to the list 5311 * We will add that VSI to the list 5312 */ 5313 status = ice_adv_add_update_vsi_list(hw, m_entry, 5314 &m_entry->rule_info, 5315 rinfo); 5316 if (added_entry) { 5317 added_entry->rid = rid; 5318 added_entry->rule_id = m_entry->rule_info.fltr_rule_id; 5319 added_entry->vsi_handle = rinfo->sw_act.vsi_handle; 5320 } 5321 return status; 5322 } 5323 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; 5324 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); 5325 if (!s_rule) 5326 return -ENOMEM; 5327 if (!rinfo->flags_info.act_valid) { 5328 act |= ICE_SINGLE_ACT_LAN_ENABLE; 5329 act |= ICE_SINGLE_ACT_LB_ENABLE; 5330 } else { 5331 act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE | 5332 ICE_SINGLE_ACT_LB_ENABLE); 5333 } 5334 5335 switch (rinfo->sw_act.fltr_act) { 5336 case ICE_FWD_TO_VSI: 5337 act |= (rinfo->sw_act.fwd_id.hw_vsi_id << 5338 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; 5339 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; 5340 break; 5341 case ICE_FWD_TO_Q: 5342 act |= ICE_SINGLE_ACT_TO_Q; 5343 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 5344 ICE_SINGLE_ACT_Q_INDEX_M; 5345 break; 5346 case ICE_FWD_TO_QGRP: 5347 q_rgn = rinfo->sw_act.qgrp_size > 0 ? 5348 (u8)ilog2(rinfo->sw_act.qgrp_size) : 0; 5349 act |= ICE_SINGLE_ACT_TO_Q; 5350 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 5351 ICE_SINGLE_ACT_Q_INDEX_M; 5352 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & 5353 ICE_SINGLE_ACT_Q_REGION_M; 5354 break; 5355 case ICE_DROP_PACKET: 5356 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 5357 ICE_SINGLE_ACT_VALID_BIT; 5358 break; 5359 default: 5360 status = -EIO; 5361 goto err_ice_add_adv_rule; 5362 } 5363 5364 /* set the rule LOOKUP type based on caller specified 'Rx' 5365 * instead of hardcoding it to be either LOOKUP_TX/RX 5366 * 5367 * for 'Rx' set the source to be the port number 5368 * for 'Tx' set the source to be the source HW VSI number (determined 5369 * by caller) 5370 */ 5371 if (rinfo->rx) { 5372 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); 5373 s_rule->pdata.lkup_tx_rx.src = 5374 cpu_to_le16(hw->port_info->lport); 5375 } else { 5376 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); 5377 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src); 5378 } 5379 5380 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid); 5381 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); 5382 5383 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, 5384 pkt_len, pkt_offsets); 5385 if (status) 5386 goto err_ice_add_adv_rule; 5387 5388 if (rinfo->tun_type != ICE_NON_TUN) { 5389 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, 5390 s_rule->pdata.lkup_tx_rx.hdr, 5391 pkt_offsets); 5392 if (status) 5393 goto err_ice_add_adv_rule; 5394 } 5395 5396 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, 5397 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules, 5398 NULL); 5399 if (status) 5400 goto err_ice_add_adv_rule; 5401 adv_fltr = devm_kzalloc(ice_hw_to_dev(hw), 5402 sizeof(struct ice_adv_fltr_mgmt_list_entry), 5403 GFP_KERNEL); 5404 if (!adv_fltr) { 5405 status = -ENOMEM; 5406 goto err_ice_add_adv_rule; 5407 } 5408 5409 adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups, 5410 lkups_cnt * sizeof(*lkups), GFP_KERNEL); 5411 if (!adv_fltr->lkups) { 5412 status = -ENOMEM; 5413 goto err_ice_add_adv_rule; 5414 } 5415 5416 adv_fltr->lkups_cnt = lkups_cnt; 5417 adv_fltr->rule_info = *rinfo; 5418 adv_fltr->rule_info.fltr_rule_id = 5419 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 5420 sw = hw->switch_info; 5421 sw->recp_list[rid].adv_rule = true; 5422 rule_head = &sw->recp_list[rid].filt_rules; 5423 5424 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) 5425 adv_fltr->vsi_count = 1; 5426 5427 /* Add rule entry to book keeping list */ 5428 list_add(&adv_fltr->list_entry, rule_head); 5429 if (added_entry) { 5430 added_entry->rid = rid; 5431 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id; 5432 added_entry->vsi_handle = rinfo->sw_act.vsi_handle; 5433 } 5434 err_ice_add_adv_rule: 5435 if (status && adv_fltr) { 5436 devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups); 5437 devm_kfree(ice_hw_to_dev(hw), adv_fltr); 5438 } 5439 5440 kfree(s_rule); 5441 5442 return status; 5443 } 5444 5445 /** 5446 * ice_replay_vsi_fltr - Replay filters for requested VSI 5447 * @hw: pointer to the hardware structure 5448 * @vsi_handle: driver VSI handle 5449 * @recp_id: Recipe ID for which rules need to be replayed 5450 * @list_head: list for which filters need to be replayed 5451 * 5452 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. 5453 * It is required to pass valid VSI handle. 5454 */ 5455 static int 5456 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, 5457 struct list_head *list_head) 5458 { 5459 struct ice_fltr_mgmt_list_entry *itr; 5460 int status = 0; 5461 u16 hw_vsi_id; 5462 5463 if (list_empty(list_head)) 5464 return status; 5465 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 5466 5467 list_for_each_entry(itr, list_head, list_entry) { 5468 struct ice_fltr_list_entry f_entry; 5469 5470 f_entry.fltr_info = itr->fltr_info; 5471 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && 5472 itr->fltr_info.vsi_handle == vsi_handle) { 5473 /* update the src in case it is VSI num */ 5474 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 5475 f_entry.fltr_info.src = hw_vsi_id; 5476 status = ice_add_rule_internal(hw, recp_id, &f_entry); 5477 if (status) 5478 goto end; 5479 continue; 5480 } 5481 if (!itr->vsi_list_info || 5482 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) 5483 continue; 5484 /* Clearing it so that the logic can add it back */ 5485 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); 5486 f_entry.fltr_info.vsi_handle = vsi_handle; 5487 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 5488 /* update the src in case it is VSI num */ 5489 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 5490 f_entry.fltr_info.src = hw_vsi_id; 5491 if (recp_id == ICE_SW_LKUP_VLAN) 5492 status = ice_add_vlan_internal(hw, &f_entry); 5493 else 5494 status = ice_add_rule_internal(hw, recp_id, &f_entry); 5495 if (status) 5496 goto end; 5497 } 5498 end: 5499 return status; 5500 } 5501 5502 /** 5503 * ice_adv_rem_update_vsi_list 5504 * @hw: pointer to the hardware structure 5505 * @vsi_handle: VSI handle of the VSI to remove 5506 * @fm_list: filter management entry for which the VSI list management needs to 5507 * be done 5508 */ 5509 static int 5510 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, 5511 struct ice_adv_fltr_mgmt_list_entry *fm_list) 5512 { 5513 struct ice_vsi_list_map_info *vsi_list_info; 5514 enum ice_sw_lkup_type lkup_type; 5515 u16 vsi_list_id; 5516 int status; 5517 5518 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST || 5519 fm_list->vsi_count == 0) 5520 return -EINVAL; 5521 5522 /* A rule with the VSI being removed does not exist */ 5523 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) 5524 return -ENOENT; 5525 5526 lkup_type = ICE_SW_LKUP_LAST; 5527 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id; 5528 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, 5529 ice_aqc_opc_update_sw_rules, 5530 lkup_type); 5531 if (status) 5532 return status; 5533 5534 fm_list->vsi_count--; 5535 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 5536 vsi_list_info = fm_list->vsi_list_info; 5537 if (fm_list->vsi_count == 1) { 5538 struct ice_fltr_info tmp_fltr; 5539 u16 rem_vsi_handle; 5540 5541 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, 5542 ICE_MAX_VSI); 5543 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 5544 return -EIO; 5545 5546 /* Make sure VSI list is empty before removing it below */ 5547 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 5548 vsi_list_id, true, 5549 ice_aqc_opc_update_sw_rules, 5550 lkup_type); 5551 if (status) 5552 return status; 5553 5554 memset(&tmp_fltr, 0, sizeof(tmp_fltr)); 5555 tmp_fltr.flag = fm_list->rule_info.sw_act.flag; 5556 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id; 5557 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; 5558 tmp_fltr.fltr_act = ICE_FWD_TO_VSI; 5559 tmp_fltr.fwd_id.hw_vsi_id = 5560 ice_get_hw_vsi_num(hw, rem_vsi_handle); 5561 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id = 5562 ice_get_hw_vsi_num(hw, rem_vsi_handle); 5563 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle; 5564 5565 /* Update the previous switch rule of "MAC forward to VSI" to 5566 * "MAC fwd to VSI list" 5567 */ 5568 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 5569 if (status) { 5570 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", 5571 tmp_fltr.fwd_id.hw_vsi_id, status); 5572 return status; 5573 } 5574 fm_list->vsi_list_info->ref_cnt--; 5575 5576 /* Remove the VSI list since it is no longer used */ 5577 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 5578 if (status) { 5579 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", 5580 vsi_list_id, status); 5581 return status; 5582 } 5583 5584 list_del(&vsi_list_info->list_entry); 5585 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 5586 fm_list->vsi_list_info = NULL; 5587 } 5588 5589 return status; 5590 } 5591 5592 /** 5593 * ice_rem_adv_rule - removes existing advanced switch rule 5594 * @hw: pointer to the hardware structure 5595 * @lkups: information on the words that needs to be looked up. All words 5596 * together makes one recipe 5597 * @lkups_cnt: num of entries in the lkups array 5598 * @rinfo: Its the pointer to the rule information for the rule 5599 * 5600 * This function can be used to remove 1 rule at a time. The lkups is 5601 * used to describe all the words that forms the "lookup" portion of the 5602 * rule. These words can span multiple protocols. Callers to this function 5603 * need to pass in a list of protocol headers with lookup information along 5604 * and mask that determines which words are valid from the given protocol 5605 * header. rinfo describes other information related to this rule such as 5606 * forwarding IDs, priority of this rule, etc. 5607 */ 5608 static int 5609 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 5610 u16 lkups_cnt, struct ice_adv_rule_info *rinfo) 5611 { 5612 struct ice_adv_fltr_mgmt_list_entry *list_elem; 5613 struct ice_prot_lkup_ext lkup_exts; 5614 bool remove_rule = false; 5615 struct mutex *rule_lock; /* Lock to protect filter rule list */ 5616 u16 i, rid, vsi_handle; 5617 int status = 0; 5618 5619 memset(&lkup_exts, 0, sizeof(lkup_exts)); 5620 for (i = 0; i < lkups_cnt; i++) { 5621 u16 count; 5622 5623 if (lkups[i].type >= ICE_PROTOCOL_LAST) 5624 return -EIO; 5625 5626 count = ice_fill_valid_words(&lkups[i], &lkup_exts); 5627 if (!count) 5628 return -EIO; 5629 } 5630 5631 /* Create any special protocol/offset pairs, such as looking at tunnel 5632 * bits by extracting metadata 5633 */ 5634 status = ice_add_special_words(rinfo, &lkup_exts); 5635 if (status) 5636 return status; 5637 5638 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type); 5639 /* If did not find a recipe that match the existing criteria */ 5640 if (rid == ICE_MAX_NUM_RECIPES) 5641 return -EINVAL; 5642 5643 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock; 5644 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); 5645 /* the rule is already removed */ 5646 if (!list_elem) 5647 return 0; 5648 mutex_lock(rule_lock); 5649 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) { 5650 remove_rule = true; 5651 } else if (list_elem->vsi_count > 1) { 5652 remove_rule = false; 5653 vsi_handle = rinfo->sw_act.vsi_handle; 5654 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); 5655 } else { 5656 vsi_handle = rinfo->sw_act.vsi_handle; 5657 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); 5658 if (status) { 5659 mutex_unlock(rule_lock); 5660 return status; 5661 } 5662 if (list_elem->vsi_count == 0) 5663 remove_rule = true; 5664 } 5665 mutex_unlock(rule_lock); 5666 if (remove_rule) { 5667 struct ice_aqc_sw_rules_elem *s_rule; 5668 u16 rule_buf_sz; 5669 5670 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; 5671 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); 5672 if (!s_rule) 5673 return -ENOMEM; 5674 s_rule->pdata.lkup_tx_rx.act = 0; 5675 s_rule->pdata.lkup_tx_rx.index = 5676 cpu_to_le16(list_elem->rule_info.fltr_rule_id); 5677 s_rule->pdata.lkup_tx_rx.hdr_len = 0; 5678 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, 5679 rule_buf_sz, 1, 5680 ice_aqc_opc_remove_sw_rules, NULL); 5681 if (!status || status == -ENOENT) { 5682 struct ice_switch_info *sw = hw->switch_info; 5683 5684 mutex_lock(rule_lock); 5685 list_del(&list_elem->list_entry); 5686 devm_kfree(ice_hw_to_dev(hw), list_elem->lkups); 5687 devm_kfree(ice_hw_to_dev(hw), list_elem); 5688 mutex_unlock(rule_lock); 5689 if (list_empty(&sw->recp_list[rid].filt_rules)) 5690 sw->recp_list[rid].adv_rule = false; 5691 } 5692 kfree(s_rule); 5693 } 5694 return status; 5695 } 5696 5697 /** 5698 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID 5699 * @hw: pointer to the hardware structure 5700 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID 5701 * 5702 * This function is used to remove 1 rule at a time. The removal is based on 5703 * the remove_entry parameter. This function will remove rule for a given 5704 * vsi_handle with a given rule_id which is passed as parameter in remove_entry 5705 */ 5706 int 5707 ice_rem_adv_rule_by_id(struct ice_hw *hw, 5708 struct ice_rule_query_data *remove_entry) 5709 { 5710 struct ice_adv_fltr_mgmt_list_entry *list_itr; 5711 struct list_head *list_head; 5712 struct ice_adv_rule_info rinfo; 5713 struct ice_switch_info *sw; 5714 5715 sw = hw->switch_info; 5716 if (!sw->recp_list[remove_entry->rid].recp_created) 5717 return -EINVAL; 5718 list_head = &sw->recp_list[remove_entry->rid].filt_rules; 5719 list_for_each_entry(list_itr, list_head, list_entry) { 5720 if (list_itr->rule_info.fltr_rule_id == 5721 remove_entry->rule_id) { 5722 rinfo = list_itr->rule_info; 5723 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle; 5724 return ice_rem_adv_rule(hw, list_itr->lkups, 5725 list_itr->lkups_cnt, &rinfo); 5726 } 5727 } 5728 /* either list is empty or unable to find rule */ 5729 return -ENOENT; 5730 } 5731 5732 /** 5733 * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a 5734 * given VSI handle 5735 * @hw: pointer to the hardware structure 5736 * @vsi_handle: VSI handle for which we are supposed to remove all the rules. 5737 * 5738 * This function is used to remove all the rules for a given VSI and as soon 5739 * as removing a rule fails, it will return immediately with the error code, 5740 * else it will return success. 5741 */ 5742 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle) 5743 { 5744 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry; 5745 struct ice_vsi_list_map_info *map_info; 5746 struct ice_adv_rule_info rinfo; 5747 struct list_head *list_head; 5748 struct ice_switch_info *sw; 5749 int status; 5750 u8 rid; 5751 5752 sw = hw->switch_info; 5753 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) { 5754 if (!sw->recp_list[rid].recp_created) 5755 continue; 5756 if (!sw->recp_list[rid].adv_rule) 5757 continue; 5758 5759 list_head = &sw->recp_list[rid].filt_rules; 5760 list_for_each_entry_safe(list_itr, tmp_entry, list_head, 5761 list_entry) { 5762 rinfo = list_itr->rule_info; 5763 5764 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) { 5765 map_info = list_itr->vsi_list_info; 5766 if (!map_info) 5767 continue; 5768 5769 if (!test_bit(vsi_handle, map_info->vsi_map)) 5770 continue; 5771 } else if (rinfo.sw_act.vsi_handle != vsi_handle) { 5772 continue; 5773 } 5774 5775 rinfo.sw_act.vsi_handle = vsi_handle; 5776 status = ice_rem_adv_rule(hw, list_itr->lkups, 5777 list_itr->lkups_cnt, &rinfo); 5778 if (status) 5779 return status; 5780 } 5781 } 5782 return 0; 5783 } 5784 5785 /** 5786 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI 5787 * @hw: pointer to the hardware structure 5788 * @vsi_handle: driver VSI handle 5789 * @list_head: list for which filters need to be replayed 5790 * 5791 * Replay the advanced rule for the given VSI. 5792 */ 5793 static int 5794 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle, 5795 struct list_head *list_head) 5796 { 5797 struct ice_rule_query_data added_entry = { 0 }; 5798 struct ice_adv_fltr_mgmt_list_entry *adv_fltr; 5799 int status = 0; 5800 5801 if (list_empty(list_head)) 5802 return status; 5803 list_for_each_entry(adv_fltr, list_head, list_entry) { 5804 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info; 5805 u16 lk_cnt = adv_fltr->lkups_cnt; 5806 5807 if (vsi_handle != rinfo->sw_act.vsi_handle) 5808 continue; 5809 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo, 5810 &added_entry); 5811 if (status) 5812 break; 5813 } 5814 return status; 5815 } 5816 5817 /** 5818 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists 5819 * @hw: pointer to the hardware structure 5820 * @vsi_handle: driver VSI handle 5821 * 5822 * Replays filters for requested VSI via vsi_handle. 5823 */ 5824 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) 5825 { 5826 struct ice_switch_info *sw = hw->switch_info; 5827 int status; 5828 u8 i; 5829 5830 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 5831 struct list_head *head; 5832 5833 head = &sw->recp_list[i].filt_replay_rules; 5834 if (!sw->recp_list[i].adv_rule) 5835 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head); 5836 else 5837 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head); 5838 if (status) 5839 return status; 5840 } 5841 return status; 5842 } 5843 5844 /** 5845 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules 5846 * @hw: pointer to the HW struct 5847 * 5848 * Deletes the filter replay rules. 5849 */ 5850 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw) 5851 { 5852 struct ice_switch_info *sw = hw->switch_info; 5853 u8 i; 5854 5855 if (!sw) 5856 return; 5857 5858 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 5859 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) { 5860 struct list_head *l_head; 5861 5862 l_head = &sw->recp_list[i].filt_replay_rules; 5863 if (!sw->recp_list[i].adv_rule) 5864 ice_rem_sw_rule_info(hw, l_head); 5865 else 5866 ice_rem_adv_rule_info(hw, l_head); 5867 } 5868 } 5869 } 5870