1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2020 Marvell. 5 */ 6 7 #include <linux/bitfield.h> 8 9 #include "rvu_struct.h" 10 #include "rvu_reg.h" 11 #include "rvu.h" 12 #include "npc.h" 13 14 #define NPC_BYTESM GENMASK_ULL(19, 16) 15 #define NPC_HDR_OFFSET GENMASK_ULL(15, 8) 16 #define NPC_KEY_OFFSET GENMASK_ULL(5, 0) 17 #define NPC_LDATA_EN BIT_ULL(7) 18 19 static const char * const npc_flow_names[] = { 20 [NPC_DMAC] = "dmac", 21 [NPC_SMAC] = "smac", 22 [NPC_ETYPE] = "ether type", 23 [NPC_OUTER_VID] = "outer vlan id", 24 [NPC_TOS] = "tos", 25 [NPC_SIP_IPV4] = "ipv4 source ip", 26 [NPC_DIP_IPV4] = "ipv4 destination ip", 27 [NPC_SIP_IPV6] = "ipv6 source ip", 28 [NPC_DIP_IPV6] = "ipv6 destination ip", 29 [NPC_IPPROTO_TCP] = "ip proto tcp", 30 [NPC_IPPROTO_UDP] = "ip proto udp", 31 [NPC_IPPROTO_SCTP] = "ip proto sctp", 32 [NPC_IPPROTO_AH] = "ip proto AH", 33 [NPC_IPPROTO_ESP] = "ip proto ESP", 34 [NPC_SPORT_TCP] = "tcp source port", 35 [NPC_DPORT_TCP] = "tcp destination port", 36 [NPC_SPORT_UDP] = "udp source port", 37 [NPC_DPORT_UDP] = "udp destination port", 38 [NPC_SPORT_SCTP] = "sctp source port", 39 [NPC_DPORT_SCTP] = "sctp destination port", 40 [NPC_UNKNOWN] = "unknown", 41 }; 42 43 const char *npc_get_field_name(u8 hdr) 44 { 45 if (hdr >= ARRAY_SIZE(npc_flow_names)) 46 return npc_flow_names[NPC_UNKNOWN]; 47 48 return npc_flow_names[hdr]; 49 } 50 51 /* Compute keyword masks and figure out the number of keywords a field 52 * spans in the key. 53 */ 54 static void npc_set_kw_masks(struct npc_mcam *mcam, u8 type, 55 u8 nr_bits, int start_kwi, int offset, u8 intf) 56 { 57 struct npc_key_field *field = &mcam->rx_key_fields[type]; 58 u8 bits_in_kw; 59 int max_kwi; 60 61 if (mcam->banks_per_entry == 1) 62 max_kwi = 1; /* NPC_MCAM_KEY_X1 */ 63 else if (mcam->banks_per_entry == 2) 64 max_kwi = 3; /* NPC_MCAM_KEY_X2 */ 65 else 66 max_kwi = 6; /* NPC_MCAM_KEY_X4 */ 67 68 if (is_npc_intf_tx(intf)) 69 field = &mcam->tx_key_fields[type]; 70 71 if (offset + nr_bits <= 64) { 72 /* one KW only */ 73 if (start_kwi > max_kwi) 74 return; 75 field->kw_mask[start_kwi] |= GENMASK_ULL(nr_bits - 1, 0) 76 << offset; 77 field->nr_kws = 1; 78 } else if (offset + nr_bits > 64 && 79 offset + nr_bits <= 128) { 80 /* two KWs */ 81 if (start_kwi + 1 > max_kwi) 82 return; 83 /* first KW mask */ 84 bits_in_kw = 64 - offset; 85 field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0) 86 << offset; 87 /* second KW mask i.e. mask for rest of bits */ 88 bits_in_kw = nr_bits + offset - 64; 89 field->kw_mask[start_kwi + 1] |= GENMASK_ULL(bits_in_kw - 1, 0); 90 field->nr_kws = 2; 91 } else { 92 /* three KWs */ 93 if (start_kwi + 2 > max_kwi) 94 return; 95 /* first KW mask */ 96 bits_in_kw = 64 - offset; 97 field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0) 98 << offset; 99 /* second KW mask */ 100 field->kw_mask[start_kwi + 1] = ~0ULL; 101 /* third KW mask i.e. mask for rest of bits */ 102 bits_in_kw = nr_bits + offset - 128; 103 field->kw_mask[start_kwi + 2] |= GENMASK_ULL(bits_in_kw - 1, 0); 104 field->nr_kws = 3; 105 } 106 } 107 108 /* Helper function to figure out whether field exists in the key */ 109 static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf) 110 { 111 struct npc_mcam *mcam = &rvu->hw->mcam; 112 struct npc_key_field *input; 113 114 input = &mcam->rx_key_fields[type]; 115 if (is_npc_intf_tx(intf)) 116 input = &mcam->tx_key_fields[type]; 117 118 return input->nr_kws > 0; 119 } 120 121 static bool npc_is_same(struct npc_key_field *input, 122 struct npc_key_field *field) 123 { 124 int ret; 125 126 ret = memcmp(&input->layer_mdata, &field->layer_mdata, 127 sizeof(struct npc_layer_mdata)); 128 return ret == 0; 129 } 130 131 static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type, 132 u64 cfg, u8 lid, u8 lt, u8 intf) 133 { 134 struct npc_key_field *input = &mcam->rx_key_fields[type]; 135 136 if (is_npc_intf_tx(intf)) 137 input = &mcam->tx_key_fields[type]; 138 139 input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); 140 input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg); 141 input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1; 142 input->layer_mdata.ltype = lt; 143 input->layer_mdata.lid = lid; 144 } 145 146 static bool npc_check_overlap_fields(struct npc_key_field *input1, 147 struct npc_key_field *input2) 148 { 149 int kwi; 150 151 /* Fields with same layer id and different ltypes are mutually 152 * exclusive hence they can be overlapped 153 */ 154 if (input1->layer_mdata.lid == input2->layer_mdata.lid && 155 input1->layer_mdata.ltype != input2->layer_mdata.ltype) 156 return false; 157 158 for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) { 159 if (input1->kw_mask[kwi] & input2->kw_mask[kwi]) 160 return true; 161 } 162 163 return false; 164 } 165 166 /* Helper function to check whether given field overlaps with any other fields 167 * in the key. Due to limitations on key size and the key extraction profile in 168 * use higher layers can overwrite lower layer's header fields. Hence overlap 169 * needs to be checked. 170 */ 171 static bool npc_check_overlap(struct rvu *rvu, int blkaddr, 172 enum key_fields type, u8 start_lid, u8 intf) 173 { 174 struct npc_mcam *mcam = &rvu->hw->mcam; 175 struct npc_key_field *dummy, *input; 176 int start_kwi, offset; 177 u8 nr_bits, lid, lt, ld; 178 u64 cfg; 179 180 dummy = &mcam->rx_key_fields[NPC_UNKNOWN]; 181 input = &mcam->rx_key_fields[type]; 182 183 if (is_npc_intf_tx(intf)) { 184 dummy = &mcam->tx_key_fields[NPC_UNKNOWN]; 185 input = &mcam->tx_key_fields[type]; 186 } 187 188 for (lid = start_lid; lid < NPC_MAX_LID; lid++) { 189 for (lt = 0; lt < NPC_MAX_LT; lt++) { 190 for (ld = 0; ld < NPC_MAX_LD; ld++) { 191 cfg = rvu_read64(rvu, blkaddr, 192 NPC_AF_INTFX_LIDX_LTX_LDX_CFG 193 (intf, lid, lt, ld)); 194 if (!FIELD_GET(NPC_LDATA_EN, cfg)) 195 continue; 196 memset(dummy, 0, sizeof(struct npc_key_field)); 197 npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg, 198 lid, lt, intf); 199 /* exclude input */ 200 if (npc_is_same(input, dummy)) 201 continue; 202 start_kwi = dummy->layer_mdata.key / 8; 203 offset = (dummy->layer_mdata.key * 8) % 64; 204 nr_bits = dummy->layer_mdata.len * 8; 205 /* form KW masks */ 206 npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits, 207 start_kwi, offset, intf); 208 /* check any input field bits falls in any 209 * other field bits. 210 */ 211 if (npc_check_overlap_fields(dummy, input)) 212 return true; 213 } 214 } 215 } 216 217 return false; 218 } 219 220 static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type, 221 u8 intf) 222 { 223 if (!npc_is_field_present(rvu, type, intf) || 224 npc_check_overlap(rvu, blkaddr, type, 0, intf)) 225 return false; 226 return true; 227 } 228 229 static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number, 230 u8 key_nibble, u8 intf) 231 { 232 u8 offset = (key_nibble * 4) % 64; /* offset within key word */ 233 u8 kwi = (key_nibble * 4) / 64; /* which word in key */ 234 u8 nr_bits = 4; /* bits in a nibble */ 235 u8 type; 236 237 switch (bit_number) { 238 case 0 ... 2: 239 type = NPC_CHAN; 240 break; 241 case 3: 242 type = NPC_ERRLEV; 243 break; 244 case 4 ... 5: 245 type = NPC_ERRCODE; 246 break; 247 case 6: 248 type = NPC_LXMB; 249 break; 250 /* check for LTYPE only as of now */ 251 case 9: 252 type = NPC_LA; 253 break; 254 case 12: 255 type = NPC_LB; 256 break; 257 case 15: 258 type = NPC_LC; 259 break; 260 case 18: 261 type = NPC_LD; 262 break; 263 case 21: 264 type = NPC_LE; 265 break; 266 case 24: 267 type = NPC_LF; 268 break; 269 case 27: 270 type = NPC_LG; 271 break; 272 case 30: 273 type = NPC_LH; 274 break; 275 default: 276 return; 277 } 278 npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf); 279 } 280 281 static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf) 282 { 283 struct npc_mcam *mcam = &rvu->hw->mcam; 284 struct npc_key_field *key_fields; 285 /* Ether type can come from three layers 286 * (ethernet, single tagged, double tagged) 287 */ 288 struct npc_key_field *etype_ether; 289 struct npc_key_field *etype_tag1; 290 struct npc_key_field *etype_tag2; 291 /* Outer VLAN TCI can come from two layers 292 * (single tagged, double tagged) 293 */ 294 struct npc_key_field *vlan_tag1; 295 struct npc_key_field *vlan_tag2; 296 u64 *features; 297 u8 start_lid; 298 int i; 299 300 key_fields = mcam->rx_key_fields; 301 features = &mcam->rx_features; 302 303 if (is_npc_intf_tx(intf)) { 304 key_fields = mcam->tx_key_fields; 305 features = &mcam->tx_features; 306 } 307 308 /* Handle header fields which can come from multiple layers like 309 * etype, outer vlan tci. These fields should have same position in 310 * the key otherwise to install a mcam rule more than one entry is 311 * needed which complicates mcam space management. 312 */ 313 etype_ether = &key_fields[NPC_ETYPE_ETHER]; 314 etype_tag1 = &key_fields[NPC_ETYPE_TAG1]; 315 etype_tag2 = &key_fields[NPC_ETYPE_TAG2]; 316 vlan_tag1 = &key_fields[NPC_VLAN_TAG1]; 317 vlan_tag2 = &key_fields[NPC_VLAN_TAG2]; 318 319 /* if key profile programmed does not extract Ethertype at all */ 320 if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) 321 goto vlan_tci; 322 323 /* if key profile programmed extracts Ethertype from one layer */ 324 if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) 325 key_fields[NPC_ETYPE] = *etype_ether; 326 if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws) 327 key_fields[NPC_ETYPE] = *etype_tag1; 328 if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws) 329 key_fields[NPC_ETYPE] = *etype_tag2; 330 331 /* if key profile programmed extracts Ethertype from multiple layers */ 332 if (etype_ether->nr_kws && etype_tag1->nr_kws) { 333 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 334 if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) 335 goto vlan_tci; 336 } 337 key_fields[NPC_ETYPE] = *etype_tag1; 338 } 339 if (etype_ether->nr_kws && etype_tag2->nr_kws) { 340 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 341 if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) 342 goto vlan_tci; 343 } 344 key_fields[NPC_ETYPE] = *etype_tag2; 345 } 346 if (etype_tag1->nr_kws && etype_tag2->nr_kws) { 347 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 348 if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) 349 goto vlan_tci; 350 } 351 key_fields[NPC_ETYPE] = *etype_tag2; 352 } 353 354 /* check none of higher layers overwrite Ethertype */ 355 start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1; 356 if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) 357 goto vlan_tci; 358 *features |= BIT_ULL(NPC_ETYPE); 359 vlan_tci: 360 /* if key profile does not extract outer vlan tci at all */ 361 if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) 362 goto done; 363 364 /* if key profile extracts outer vlan tci from one layer */ 365 if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws) 366 key_fields[NPC_OUTER_VID] = *vlan_tag1; 367 if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws) 368 key_fields[NPC_OUTER_VID] = *vlan_tag2; 369 370 /* if key profile extracts outer vlan tci from multiple layers */ 371 if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) { 372 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 373 if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) 374 goto done; 375 } 376 key_fields[NPC_OUTER_VID] = *vlan_tag2; 377 } 378 /* check none of higher layers overwrite outer vlan tci */ 379 start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1; 380 if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) 381 goto done; 382 *features |= BIT_ULL(NPC_OUTER_VID); 383 done: 384 return; 385 } 386 387 static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid, 388 u8 lt, u64 cfg, u8 intf) 389 { 390 struct npc_mcam *mcam = &rvu->hw->mcam; 391 u8 hdr, key, nr_bytes, bit_offset; 392 u8 la_ltype, la_start; 393 /* starting KW index and starting bit position */ 394 int start_kwi, offset; 395 396 nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1; 397 hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); 398 key = FIELD_GET(NPC_KEY_OFFSET, cfg); 399 start_kwi = key / 8; 400 offset = (key * 8) % 64; 401 402 /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding 403 * ethernet header. 404 */ 405 if (is_npc_intf_tx(intf)) { 406 la_ltype = NPC_LT_LA_IH_NIX_ETHER; 407 la_start = 8; 408 } else { 409 la_ltype = NPC_LT_LA_ETHER; 410 la_start = 0; 411 } 412 413 #define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \ 414 do { \ 415 if (lid == (hlid) && lt == (hlt)) { \ 416 if ((hstart) >= hdr && \ 417 ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \ 418 bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \ 419 npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \ 420 npc_set_kw_masks(mcam, (name), (hlen) * 8, \ 421 start_kwi, offset + bit_offset, intf);\ 422 } \ 423 } \ 424 } while (0) 425 426 /* List LID, LTYPE, start offset from layer and length(in bytes) of 427 * packet header fields below. 428 * Example: Source IP is 4 bytes and starts at 12th byte of IP header 429 */ 430 NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4); 431 NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4); 432 NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); 433 NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16); 434 NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2); 435 NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2); 436 NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2); 437 NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2); 438 NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2); 439 NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2); 440 NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2); 441 NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2); 442 NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2); 443 NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2); 444 NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2); 445 NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6); 446 NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6); 447 /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */ 448 NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2); 449 } 450 451 static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) 452 { 453 struct npc_mcam *mcam = &rvu->hw->mcam; 454 u64 *features = &mcam->rx_features; 455 u64 tcp_udp_sctp; 456 int hdr; 457 458 if (is_npc_intf_tx(intf)) 459 features = &mcam->tx_features; 460 461 for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) { 462 if (npc_check_field(rvu, blkaddr, hdr, intf)) 463 *features |= BIT_ULL(hdr); 464 } 465 466 tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) | 467 BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) | 468 BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP); 469 470 /* for tcp/udp/sctp corresponding layer type should be in the key */ 471 if (*features & tcp_udp_sctp) { 472 if (!npc_check_field(rvu, blkaddr, NPC_LD, intf)) 473 *features &= ~tcp_udp_sctp; 474 else 475 *features |= BIT_ULL(NPC_IPPROTO_TCP) | 476 BIT_ULL(NPC_IPPROTO_UDP) | 477 BIT_ULL(NPC_IPPROTO_SCTP); 478 } 479 480 /* for AH, check if corresponding layer type is present in the key */ 481 if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) 482 *features |= BIT_ULL(NPC_IPPROTO_AH); 483 484 /* for ESP, check if corresponding layer type is present in the key */ 485 if (npc_check_field(rvu, blkaddr, NPC_LE, intf)) 486 *features |= BIT_ULL(NPC_IPPROTO_ESP); 487 488 /* for vlan corresponding layer type should be in the key */ 489 if (*features & BIT_ULL(NPC_OUTER_VID)) 490 if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) 491 *features &= ~BIT_ULL(NPC_OUTER_VID); 492 } 493 494 /* Scan key extraction profile and record how fields of our interest 495 * fill the key structure. Also verify Channel and DMAC exists in 496 * key and not overwritten by other header fields. 497 */ 498 static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf) 499 { 500 struct npc_mcam *mcam = &rvu->hw->mcam; 501 u8 lid, lt, ld, bitnr; 502 u8 key_nibble = 0; 503 u64 cfg; 504 505 /* Scan and note how parse result is going to be in key. 506 * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from 507 * parse result in the key. The enabled nibbles from parse result 508 * will be concatenated in key. 509 */ 510 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf)); 511 cfg &= NPC_PARSE_NIBBLE; 512 for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) { 513 npc_scan_parse_result(mcam, bitnr, key_nibble, intf); 514 key_nibble++; 515 } 516 517 /* Scan and note how layer data is going to be in key */ 518 for (lid = 0; lid < NPC_MAX_LID; lid++) { 519 for (lt = 0; lt < NPC_MAX_LT; lt++) { 520 for (ld = 0; ld < NPC_MAX_LD; ld++) { 521 cfg = rvu_read64(rvu, blkaddr, 522 NPC_AF_INTFX_LIDX_LTX_LDX_CFG 523 (intf, lid, lt, ld)); 524 if (!FIELD_GET(NPC_LDATA_EN, cfg)) 525 continue; 526 npc_scan_ldata(rvu, blkaddr, lid, lt, cfg, 527 intf); 528 } 529 } 530 } 531 532 return 0; 533 } 534 535 static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr) 536 { 537 int err; 538 539 err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX); 540 if (err) 541 return err; 542 543 err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX); 544 if (err) 545 return err; 546 547 /* Channel is mandatory */ 548 if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) { 549 dev_err(rvu->dev, "Channel not present in Key\n"); 550 return -EINVAL; 551 } 552 /* check that none of the fields overwrite channel */ 553 if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) { 554 dev_err(rvu->dev, "Channel cannot be overwritten\n"); 555 return -EINVAL; 556 } 557 /* DMAC should be present in key for unicast filter to work */ 558 if (!npc_is_field_present(rvu, NPC_DMAC, NIX_INTF_RX)) { 559 dev_err(rvu->dev, "DMAC not present in Key\n"); 560 return -EINVAL; 561 } 562 /* check that none of the fields overwrite DMAC */ 563 if (npc_check_overlap(rvu, blkaddr, NPC_DMAC, 0, NIX_INTF_RX)) { 564 dev_err(rvu->dev, "DMAC cannot be overwritten\n"); 565 return -EINVAL; 566 } 567 568 npc_set_features(rvu, blkaddr, NIX_INTF_TX); 569 npc_set_features(rvu, blkaddr, NIX_INTF_RX); 570 npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX); 571 npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX); 572 573 return 0; 574 } 575 576 int npc_flow_steering_init(struct rvu *rvu, int blkaddr) 577 { 578 struct npc_mcam *mcam = &rvu->hw->mcam; 579 580 INIT_LIST_HEAD(&mcam->mcam_rules); 581 582 return npc_scan_verify_kex(rvu, blkaddr); 583 } 584 585 static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf) 586 { 587 struct npc_mcam *mcam = &rvu->hw->mcam; 588 u64 *mcam_features = &mcam->rx_features; 589 u64 unsupported; 590 u8 bit; 591 592 if (is_npc_intf_tx(intf)) 593 mcam_features = &mcam->tx_features; 594 595 unsupported = (*mcam_features ^ features) & ~(*mcam_features); 596 if (unsupported) { 597 dev_info(rvu->dev, "Unsupported flow(s):\n"); 598 for_each_set_bit(bit, (unsigned long *)&unsupported, 64) 599 dev_info(rvu->dev, "%s ", npc_get_field_name(bit)); 600 return -EOPNOTSUPP; 601 } 602 603 return 0; 604 } 605 606 /* npc_update_entry - Based on the masks generated during 607 * the key scanning, updates the given entry with value and 608 * masks for the field of interest. Maximum 16 bytes of a packet 609 * header can be extracted by HW hence lo and hi are sufficient. 610 * When field bytes are less than or equal to 8 then hi should be 611 * 0 for value and mask. 612 * 613 * If exact match of value is required then mask should be all 1's. 614 * If any bits in mask are 0 then corresponding bits in value are 615 * dont care. 616 */ 617 static void npc_update_entry(struct rvu *rvu, enum key_fields type, 618 struct mcam_entry *entry, u64 val_lo, 619 u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf) 620 { 621 struct npc_mcam *mcam = &rvu->hw->mcam; 622 struct mcam_entry dummy = { {0} }; 623 struct npc_key_field *field; 624 u64 kw1, kw2, kw3; 625 u8 shift; 626 int i; 627 628 field = &mcam->rx_key_fields[type]; 629 if (is_npc_intf_tx(intf)) 630 field = &mcam->tx_key_fields[type]; 631 632 if (!field->nr_kws) 633 return; 634 635 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 636 if (!field->kw_mask[i]) 637 continue; 638 /* place key value in kw[x] */ 639 shift = __ffs64(field->kw_mask[i]); 640 /* update entry value */ 641 kw1 = (val_lo << shift) & field->kw_mask[i]; 642 dummy.kw[i] = kw1; 643 /* update entry mask */ 644 kw1 = (mask_lo << shift) & field->kw_mask[i]; 645 dummy.kw_mask[i] = kw1; 646 647 if (field->nr_kws == 1) 648 break; 649 /* place remaining bits of key value in kw[x + 1] */ 650 if (field->nr_kws == 2) { 651 /* update entry value */ 652 kw2 = shift ? val_lo >> (64 - shift) : 0; 653 kw2 |= (val_hi << shift); 654 kw2 &= field->kw_mask[i + 1]; 655 dummy.kw[i + 1] = kw2; 656 /* update entry mask */ 657 kw2 = shift ? mask_lo >> (64 - shift) : 0; 658 kw2 |= (mask_hi << shift); 659 kw2 &= field->kw_mask[i + 1]; 660 dummy.kw_mask[i + 1] = kw2; 661 break; 662 } 663 /* place remaining bits of key value in kw[x + 1], kw[x + 2] */ 664 if (field->nr_kws == 3) { 665 /* update entry value */ 666 kw2 = shift ? val_lo >> (64 - shift) : 0; 667 kw2 |= (val_hi << shift); 668 kw2 &= field->kw_mask[i + 1]; 669 kw3 = shift ? val_hi >> (64 - shift) : 0; 670 kw3 &= field->kw_mask[i + 2]; 671 dummy.kw[i + 1] = kw2; 672 dummy.kw[i + 2] = kw3; 673 /* update entry mask */ 674 kw2 = shift ? mask_lo >> (64 - shift) : 0; 675 kw2 |= (mask_hi << shift); 676 kw2 &= field->kw_mask[i + 1]; 677 kw3 = shift ? mask_hi >> (64 - shift) : 0; 678 kw3 &= field->kw_mask[i + 2]; 679 dummy.kw_mask[i + 1] = kw2; 680 dummy.kw_mask[i + 2] = kw3; 681 break; 682 } 683 } 684 /* dummy is ready with values and masks for given key 685 * field now clear and update input entry with those 686 */ 687 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 688 if (!field->kw_mask[i]) 689 continue; 690 entry->kw[i] &= ~field->kw_mask[i]; 691 entry->kw_mask[i] &= ~field->kw_mask[i]; 692 693 entry->kw[i] |= dummy.kw[i]; 694 entry->kw_mask[i] |= dummy.kw_mask[i]; 695 } 696 } 697 698 #define IPV6_WORDS 4 699 700 static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry, 701 u64 features, struct flow_msg *pkt, 702 struct flow_msg *mask, 703 struct rvu_npc_mcam_rule *output, u8 intf) 704 { 705 u32 src_ip[IPV6_WORDS], src_ip_mask[IPV6_WORDS]; 706 u32 dst_ip[IPV6_WORDS], dst_ip_mask[IPV6_WORDS]; 707 struct flow_msg *opkt = &output->packet; 708 struct flow_msg *omask = &output->mask; 709 u64 mask_lo, mask_hi; 710 u64 val_lo, val_hi; 711 712 /* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet 713 * values to be programmed in MCAM should as below: 714 * val_high: 0xfe80000000000000 715 * val_low: 0x2c6863fffe5e2d0a 716 */ 717 if (features & BIT_ULL(NPC_SIP_IPV6)) { 718 be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS); 719 be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS); 720 721 mask_hi = (u64)src_ip_mask[0] << 32 | src_ip_mask[1]; 722 mask_lo = (u64)src_ip_mask[2] << 32 | src_ip_mask[3]; 723 val_hi = (u64)src_ip[0] << 32 | src_ip[1]; 724 val_lo = (u64)src_ip[2] << 32 | src_ip[3]; 725 726 npc_update_entry(rvu, NPC_SIP_IPV6, entry, val_lo, val_hi, 727 mask_lo, mask_hi, intf); 728 memcpy(opkt->ip6src, pkt->ip6src, sizeof(opkt->ip6src)); 729 memcpy(omask->ip6src, mask->ip6src, sizeof(omask->ip6src)); 730 } 731 if (features & BIT_ULL(NPC_DIP_IPV6)) { 732 be32_to_cpu_array(dst_ip_mask, mask->ip6dst, IPV6_WORDS); 733 be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS); 734 735 mask_hi = (u64)dst_ip_mask[0] << 32 | dst_ip_mask[1]; 736 mask_lo = (u64)dst_ip_mask[2] << 32 | dst_ip_mask[3]; 737 val_hi = (u64)dst_ip[0] << 32 | dst_ip[1]; 738 val_lo = (u64)dst_ip[2] << 32 | dst_ip[3]; 739 740 npc_update_entry(rvu, NPC_DIP_IPV6, entry, val_lo, val_hi, 741 mask_lo, mask_hi, intf); 742 memcpy(opkt->ip6dst, pkt->ip6dst, sizeof(opkt->ip6dst)); 743 memcpy(omask->ip6dst, mask->ip6dst, sizeof(omask->ip6dst)); 744 } 745 } 746 747 static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, 748 u64 features, struct flow_msg *pkt, 749 struct flow_msg *mask, 750 struct rvu_npc_mcam_rule *output, u8 intf) 751 { 752 u64 dmac_mask = ether_addr_to_u64(mask->dmac); 753 u64 smac_mask = ether_addr_to_u64(mask->smac); 754 u64 dmac_val = ether_addr_to_u64(pkt->dmac); 755 u64 smac_val = ether_addr_to_u64(pkt->smac); 756 struct flow_msg *opkt = &output->packet; 757 struct flow_msg *omask = &output->mask; 758 759 if (!features) 760 return; 761 762 /* For tcp/udp/sctp LTYPE should be present in entry */ 763 if (features & BIT_ULL(NPC_IPPROTO_TCP)) 764 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP, 765 0, ~0ULL, 0, intf); 766 if (features & BIT_ULL(NPC_IPPROTO_UDP)) 767 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP, 768 0, ~0ULL, 0, intf); 769 if (features & BIT_ULL(NPC_IPPROTO_SCTP)) 770 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP, 771 0, ~0ULL, 0, intf); 772 773 if (features & BIT_ULL(NPC_OUTER_VID)) 774 npc_update_entry(rvu, NPC_LB, entry, 775 NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0, 776 NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf); 777 778 /* For AH, LTYPE should be present in entry */ 779 if (features & BIT_ULL(NPC_IPPROTO_AH)) 780 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH, 781 0, ~0ULL, 0, intf); 782 /* For ESP, LTYPE should be present in entry */ 783 if (features & BIT_ULL(NPC_IPPROTO_ESP)) 784 npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP, 785 0, ~0ULL, 0, intf); 786 787 #define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \ 788 do { \ 789 if (features & BIT_ULL((field))) { \ 790 npc_update_entry(rvu, (field), entry, (val_lo), (val_hi), \ 791 (mask_lo), (mask_hi), intf); \ 792 memcpy(&opkt->member, &pkt->member, sizeof(pkt->member)); \ 793 memcpy(&omask->member, &mask->member, sizeof(mask->member)); \ 794 } \ 795 } while (0) 796 797 NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0); 798 NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0); 799 NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0, 800 ntohs(mask->etype), 0); 801 NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0, 802 ntohl(mask->ip4src), 0); 803 NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0, 804 ntohl(mask->ip4dst), 0); 805 NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0, 806 ntohs(mask->sport), 0); 807 NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0, 808 ntohs(mask->sport), 0); 809 NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0, 810 ntohs(mask->dport), 0); 811 NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0, 812 ntohs(mask->dport), 0); 813 NPC_WRITE_FLOW(NPC_SPORT_SCTP, sport, ntohs(pkt->sport), 0, 814 ntohs(mask->sport), 0); 815 NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0, 816 ntohs(mask->dport), 0); 817 818 NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0, 819 ntohs(mask->vlan_tci), 0); 820 821 npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf); 822 } 823 824 static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, 825 u16 entry) 826 { 827 struct rvu_npc_mcam_rule *iter; 828 829 mutex_lock(&mcam->lock); 830 list_for_each_entry(iter, &mcam->mcam_rules, list) { 831 if (iter->entry == entry) { 832 mutex_unlock(&mcam->lock); 833 return iter; 834 } 835 } 836 mutex_unlock(&mcam->lock); 837 838 return NULL; 839 } 840 841 static void rvu_mcam_add_rule(struct npc_mcam *mcam, 842 struct rvu_npc_mcam_rule *rule) 843 { 844 struct list_head *head = &mcam->mcam_rules; 845 struct rvu_npc_mcam_rule *iter; 846 847 mutex_lock(&mcam->lock); 848 list_for_each_entry(iter, &mcam->mcam_rules, list) { 849 if (iter->entry > rule->entry) 850 break; 851 head = &iter->list; 852 } 853 854 list_add(&rule->list, head); 855 mutex_unlock(&mcam->lock); 856 } 857 858 static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc, 859 struct rvu_npc_mcam_rule *rule) 860 { 861 struct npc_mcam_oper_counter_req free_req = { 0 }; 862 struct msg_rsp free_rsp; 863 864 if (!rule->has_cntr) 865 return; 866 867 free_req.hdr.pcifunc = pcifunc; 868 free_req.cntr = rule->cntr; 869 870 rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp); 871 rule->has_cntr = false; 872 } 873 874 static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, 875 struct rvu_npc_mcam_rule *rule, 876 struct npc_install_flow_rsp *rsp) 877 { 878 struct npc_mcam_alloc_counter_req cntr_req = { 0 }; 879 struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; 880 int err; 881 882 cntr_req.hdr.pcifunc = pcifunc; 883 cntr_req.contig = true; 884 cntr_req.count = 1; 885 886 /* we try to allocate a counter to track the stats of this 887 * rule. If counter could not be allocated then proceed 888 * without counter because counters are limited than entries. 889 */ 890 err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, 891 &cntr_rsp); 892 if (!err && cntr_rsp.count) { 893 rule->cntr = cntr_rsp.cntr; 894 rule->has_cntr = true; 895 rsp->counter = rule->cntr; 896 } else { 897 rsp->counter = err; 898 } 899 } 900 901 static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, 902 struct mcam_entry *entry, 903 struct npc_install_flow_req *req, u16 target) 904 { 905 struct nix_rx_action action; 906 907 npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, 908 ~0ULL, 0, NIX_INTF_RX); 909 910 *(u64 *)&action = 0x00; 911 action.pf_func = target; 912 action.op = req->op; 913 action.index = req->index; 914 action.match_id = req->match_id; 915 action.flow_key_alg = req->flow_key_alg; 916 917 if (req->op == NIX_RX_ACTION_DEFAULT && pfvf->def_ucast_rule) 918 action = pfvf->def_ucast_rule->rx_action; 919 920 entry->action = *(u64 *)&action; 921 922 /* VTAG0 starts at 0th byte of LID_B. 923 * VTAG1 starts at 4th byte of LID_B. 924 */ 925 entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) | 926 FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) | 927 FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) | 928 FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) | 929 FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) | 930 FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) | 931 FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) | 932 FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4); 933 } 934 935 static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, 936 struct mcam_entry *entry, 937 struct npc_install_flow_req *req, u16 target) 938 { 939 struct nix_tx_action action; 940 941 npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target), 942 0, ~0ULL, 0, NIX_INTF_TX); 943 944 *(u64 *)&action = 0x00; 945 action.op = req->op; 946 action.index = req->index; 947 action.match_id = req->match_id; 948 949 entry->action = *(u64 *)&action; 950 951 /* VTAG0 starts at 0th byte of LID_B. 952 * VTAG1 starts at 4th byte of LID_B. 953 */ 954 entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) | 955 FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) | 956 FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) | 957 FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) | 958 FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) | 959 FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) | 960 FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) | 961 FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24); 962 } 963 964 static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, 965 int nixlf, struct rvu_pfvf *pfvf, 966 struct npc_install_flow_req *req, 967 struct npc_install_flow_rsp *rsp, bool enable, 968 bool pf_set_vfs_mac) 969 { 970 struct rvu_npc_mcam_rule *def_ucast_rule = pfvf->def_ucast_rule; 971 u64 features, installed_features, missing_features = 0; 972 struct npc_mcam_write_entry_req write_req = { 0 }; 973 struct npc_mcam *mcam = &rvu->hw->mcam; 974 struct rvu_npc_mcam_rule dummy = { 0 }; 975 struct rvu_npc_mcam_rule *rule; 976 bool new = false, msg_from_vf; 977 u16 owner = req->hdr.pcifunc; 978 struct msg_rsp write_rsp; 979 struct mcam_entry *entry; 980 int entry_index, err; 981 982 msg_from_vf = !!(owner & RVU_PFVF_FUNC_MASK); 983 984 installed_features = req->features; 985 features = req->features; 986 entry = &write_req.entry_data; 987 entry_index = req->entry; 988 989 npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy, 990 req->intf); 991 992 if (is_npc_intf_rx(req->intf)) 993 npc_update_rx_entry(rvu, pfvf, entry, req, target); 994 else 995 npc_update_tx_entry(rvu, pfvf, entry, req, target); 996 997 /* Default unicast rules do not exist for TX */ 998 if (is_npc_intf_tx(req->intf)) 999 goto find_rule; 1000 1001 if (def_ucast_rule) 1002 missing_features = (def_ucast_rule->features ^ features) & 1003 def_ucast_rule->features; 1004 1005 if (req->default_rule && req->append) { 1006 /* add to default rule */ 1007 if (missing_features) 1008 npc_update_flow(rvu, entry, missing_features, 1009 &def_ucast_rule->packet, 1010 &def_ucast_rule->mask, 1011 &dummy, req->intf); 1012 enable = rvu_npc_write_default_rule(rvu, blkaddr, 1013 nixlf, target, 1014 pfvf->nix_rx_intf, entry, 1015 &entry_index); 1016 installed_features = req->features | missing_features; 1017 } else if (req->default_rule && !req->append) { 1018 /* overwrite default rule */ 1019 enable = rvu_npc_write_default_rule(rvu, blkaddr, 1020 nixlf, target, 1021 pfvf->nix_rx_intf, entry, 1022 &entry_index); 1023 } else if (msg_from_vf) { 1024 /* normal rule - include default rule also to it for VF */ 1025 npc_update_flow(rvu, entry, missing_features, 1026 &def_ucast_rule->packet, &def_ucast_rule->mask, 1027 &dummy, req->intf); 1028 installed_features = req->features | missing_features; 1029 } 1030 1031 find_rule: 1032 rule = rvu_mcam_find_rule(mcam, entry_index); 1033 if (!rule) { 1034 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 1035 if (!rule) 1036 return -ENOMEM; 1037 new = true; 1038 } 1039 /* no counter for default rule */ 1040 if (req->default_rule) 1041 goto update_rule; 1042 1043 /* allocate new counter if rule has no counter */ 1044 if (req->set_cntr && !rule->has_cntr) 1045 rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp); 1046 1047 /* if user wants to delete an existing counter for a rule then 1048 * free the counter 1049 */ 1050 if (!req->set_cntr && rule->has_cntr) 1051 rvu_mcam_remove_counter_from_rule(rvu, owner, rule); 1052 1053 write_req.hdr.pcifunc = owner; 1054 write_req.entry = req->entry; 1055 write_req.intf = req->intf; 1056 write_req.enable_entry = (u8)enable; 1057 /* if counter is available then clear and use it */ 1058 if (req->set_cntr && rule->has_cntr) { 1059 rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00); 1060 write_req.set_cntr = 1; 1061 write_req.cntr = rule->cntr; 1062 } 1063 1064 err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, 1065 &write_rsp); 1066 if (err) { 1067 rvu_mcam_remove_counter_from_rule(rvu, owner, rule); 1068 if (new) 1069 kfree(rule); 1070 return err; 1071 } 1072 update_rule: 1073 memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet)); 1074 memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask)); 1075 rule->entry = entry_index; 1076 memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action)); 1077 if (is_npc_intf_tx(req->intf)) 1078 memcpy(&rule->tx_action, &entry->action, 1079 sizeof(struct nix_tx_action)); 1080 rule->vtag_action = entry->vtag_action; 1081 rule->features = installed_features; 1082 rule->default_rule = req->default_rule; 1083 rule->owner = owner; 1084 rule->enable = enable; 1085 if (is_npc_intf_tx(req->intf)) 1086 rule->intf = pfvf->nix_tx_intf; 1087 else 1088 rule->intf = pfvf->nix_rx_intf; 1089 1090 if (new) 1091 rvu_mcam_add_rule(mcam, rule); 1092 if (req->default_rule) 1093 pfvf->def_ucast_rule = rule; 1094 1095 /* VF's MAC address is being changed via PF */ 1096 if (pf_set_vfs_mac) { 1097 ether_addr_copy(pfvf->default_mac, req->packet.dmac); 1098 ether_addr_copy(pfvf->mac_addr, req->packet.dmac); 1099 } 1100 1101 if (pfvf->pf_set_vf_cfg && req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7) 1102 rule->vfvlan_cfg = true; 1103 1104 return 0; 1105 } 1106 1107 int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, 1108 struct npc_install_flow_req *req, 1109 struct npc_install_flow_rsp *rsp) 1110 { 1111 bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK); 1112 int blkaddr, nixlf, err; 1113 struct rvu_pfvf *pfvf; 1114 bool pf_set_vfs_mac = false; 1115 bool enable = true; 1116 u16 target; 1117 1118 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1119 if (blkaddr < 0) { 1120 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1121 return -ENODEV; 1122 } 1123 1124 if (!is_npc_interface_valid(rvu, req->intf)) 1125 return -EINVAL; 1126 1127 if (from_vf && req->default_rule) 1128 return NPC_MCAM_PERM_DENIED; 1129 1130 /* Each PF/VF info is maintained in struct rvu_pfvf. 1131 * rvu_pfvf for the target PF/VF needs to be retrieved 1132 * hence modify pcifunc accordingly. 1133 */ 1134 1135 /* AF installing for a PF/VF */ 1136 if (!req->hdr.pcifunc) 1137 target = req->vf; 1138 /* PF installing for its VF */ 1139 else if (!from_vf && req->vf) { 1140 target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf; 1141 pf_set_vfs_mac = req->default_rule && 1142 (req->features & BIT_ULL(NPC_DMAC)); 1143 } 1144 /* msg received from PF/VF */ 1145 else 1146 target = req->hdr.pcifunc; 1147 1148 if (npc_check_unsupported_flows(rvu, req->features, req->intf)) 1149 return -EOPNOTSUPP; 1150 1151 if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) 1152 return -EINVAL; 1153 1154 pfvf = rvu_get_pfvf(rvu, target); 1155 1156 /* PF installing for its VF */ 1157 if (req->hdr.pcifunc && !from_vf && req->vf) 1158 pfvf->pf_set_vf_cfg = 1; 1159 1160 /* update req destination mac addr */ 1161 if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) && 1162 is_zero_ether_addr(req->packet.dmac)) { 1163 ether_addr_copy(req->packet.dmac, pfvf->mac_addr); 1164 eth_broadcast_addr((u8 *)&req->mask.dmac); 1165 } 1166 1167 err = nix_get_nixlf(rvu, target, &nixlf, NULL); 1168 1169 /* If interface is uninitialized then do not enable entry */ 1170 if (err || (!req->default_rule && !pfvf->def_ucast_rule)) 1171 enable = false; 1172 1173 /* Packets reaching NPC in Tx path implies that a 1174 * NIXLF is properly setup and transmitting. 1175 * Hence rules can be enabled for Tx. 1176 */ 1177 if (is_npc_intf_tx(req->intf)) 1178 enable = true; 1179 1180 /* Do not allow requests from uninitialized VFs */ 1181 if (from_vf && !enable) 1182 return -EINVAL; 1183 1184 /* If message is from VF then its flow should not overlap with 1185 * reserved unicast flow. 1186 */ 1187 if (from_vf && pfvf->def_ucast_rule && is_npc_intf_rx(req->intf) && 1188 pfvf->def_ucast_rule->features & req->features) 1189 return -EINVAL; 1190 1191 return npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, req, rsp, 1192 enable, pf_set_vfs_mac); 1193 } 1194 1195 static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule, 1196 u16 pcifunc) 1197 { 1198 struct npc_mcam_ena_dis_entry_req dis_req = { 0 }; 1199 struct msg_rsp dis_rsp; 1200 1201 if (rule->default_rule) 1202 return 0; 1203 1204 if (rule->has_cntr) 1205 rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule); 1206 1207 dis_req.hdr.pcifunc = pcifunc; 1208 dis_req.entry = rule->entry; 1209 1210 list_del(&rule->list); 1211 kfree(rule); 1212 1213 return rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp); 1214 } 1215 1216 int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu, 1217 struct npc_delete_flow_req *req, 1218 struct msg_rsp *rsp) 1219 { 1220 struct npc_mcam *mcam = &rvu->hw->mcam; 1221 struct rvu_npc_mcam_rule *iter, *tmp; 1222 u16 pcifunc = req->hdr.pcifunc; 1223 struct list_head del_list; 1224 1225 INIT_LIST_HEAD(&del_list); 1226 1227 mutex_lock(&mcam->lock); 1228 list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) { 1229 if (iter->owner == pcifunc) { 1230 /* All rules */ 1231 if (req->all) { 1232 list_move_tail(&iter->list, &del_list); 1233 /* Range of rules */ 1234 } else if (req->end && iter->entry >= req->start && 1235 iter->entry <= req->end) { 1236 list_move_tail(&iter->list, &del_list); 1237 /* single rule */ 1238 } else if (req->entry == iter->entry) { 1239 list_move_tail(&iter->list, &del_list); 1240 break; 1241 } 1242 } 1243 } 1244 mutex_unlock(&mcam->lock); 1245 1246 list_for_each_entry_safe(iter, tmp, &del_list, list) { 1247 u16 entry = iter->entry; 1248 1249 /* clear the mcam entry target pcifunc */ 1250 mcam->entry2target_pffunc[entry] = 0x0; 1251 if (npc_delete_flow(rvu, iter, pcifunc)) 1252 dev_err(rvu->dev, "rule deletion failed for entry:%u", 1253 entry); 1254 } 1255 1256 return 0; 1257 } 1258 1259 static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr, 1260 struct rvu_npc_mcam_rule *rule, 1261 struct rvu_pfvf *pfvf) 1262 { 1263 struct npc_mcam_write_entry_req write_req = { 0 }; 1264 struct mcam_entry *entry = &write_req.entry_data; 1265 struct npc_mcam *mcam = &rvu->hw->mcam; 1266 struct msg_rsp rsp; 1267 u8 intf, enable; 1268 int err; 1269 1270 ether_addr_copy(rule->packet.dmac, pfvf->mac_addr); 1271 1272 npc_read_mcam_entry(rvu, mcam, npcblkaddr, rule->entry, 1273 entry, &intf, &enable); 1274 1275 npc_update_entry(rvu, NPC_DMAC, entry, 1276 ether_addr_to_u64(pfvf->mac_addr), 0, 1277 0xffffffffffffull, 0, intf); 1278 1279 write_req.hdr.pcifunc = rule->owner; 1280 write_req.entry = rule->entry; 1281 1282 mutex_unlock(&mcam->lock); 1283 err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp); 1284 mutex_lock(&mcam->lock); 1285 1286 return err; 1287 } 1288 1289 void npc_mcam_enable_flows(struct rvu *rvu, u16 target) 1290 { 1291 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target); 1292 struct rvu_npc_mcam_rule *def_ucast_rule; 1293 struct npc_mcam *mcam = &rvu->hw->mcam; 1294 struct rvu_npc_mcam_rule *rule; 1295 int blkaddr, bank, index; 1296 u64 def_action; 1297 1298 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1299 if (blkaddr < 0) 1300 return; 1301 1302 def_ucast_rule = pfvf->def_ucast_rule; 1303 1304 mutex_lock(&mcam->lock); 1305 list_for_each_entry(rule, &mcam->mcam_rules, list) { 1306 if (is_npc_intf_rx(rule->intf) && 1307 rule->rx_action.pf_func == target && !rule->enable) { 1308 if (rule->default_rule) { 1309 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1310 rule->entry, true); 1311 rule->enable = true; 1312 continue; 1313 } 1314 1315 if (rule->vfvlan_cfg) 1316 npc_update_dmac_value(rvu, blkaddr, rule, pfvf); 1317 1318 if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) { 1319 if (!def_ucast_rule) 1320 continue; 1321 /* Use default unicast entry action */ 1322 rule->rx_action = def_ucast_rule->rx_action; 1323 def_action = *(u64 *)&def_ucast_rule->rx_action; 1324 bank = npc_get_bank(mcam, rule->entry); 1325 rvu_write64(rvu, blkaddr, 1326 NPC_AF_MCAMEX_BANKX_ACTION 1327 (rule->entry, bank), def_action); 1328 } 1329 1330 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1331 rule->entry, true); 1332 rule->enable = true; 1333 } 1334 } 1335 1336 /* Enable MCAM entries installed by PF with target as VF pcifunc */ 1337 for (index = 0; index < mcam->bmap_entries; index++) { 1338 if (mcam->entry2target_pffunc[index] == target) 1339 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1340 index, true); 1341 } 1342 mutex_unlock(&mcam->lock); 1343 } 1344 1345 void npc_mcam_disable_flows(struct rvu *rvu, u16 target) 1346 { 1347 struct npc_mcam *mcam = &rvu->hw->mcam; 1348 int blkaddr, index; 1349 1350 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1351 if (blkaddr < 0) 1352 return; 1353 1354 mutex_lock(&mcam->lock); 1355 /* Disable MCAM entries installed by PF with target as VF pcifunc */ 1356 for (index = 0; index < mcam->bmap_entries; index++) { 1357 if (mcam->entry2target_pffunc[index] == target) 1358 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1359 index, false); 1360 } 1361 mutex_unlock(&mcam->lock); 1362 } 1363