1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Admin Function driver 3 * 4 * Copyright (C) 2020 Marvell. 5 */ 6 7 #include <linux/bitfield.h> 8 9 #include "rvu_struct.h" 10 #include "rvu_reg.h" 11 #include "rvu.h" 12 #include "npc.h" 13 #include "rvu_npc_fs.h" 14 #include "rvu_npc_hash.h" 15 16 #define NPC_BYTESM GENMASK_ULL(19, 16) 17 #define NPC_HDR_OFFSET GENMASK_ULL(15, 8) 18 #define NPC_KEY_OFFSET GENMASK_ULL(5, 0) 19 #define NPC_LDATA_EN BIT_ULL(7) 20 21 static const char * const npc_flow_names[] = { 22 [NPC_DMAC] = "dmac", 23 [NPC_SMAC] = "smac", 24 [NPC_ETYPE] = "ether type", 25 [NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag", 26 [NPC_VLAN_ETYPE_STAG] = "vlan ether type stag", 27 [NPC_OUTER_VID] = "outer vlan id", 28 [NPC_TOS] = "tos", 29 [NPC_IPFRAG_IPV4] = "fragmented IPv4 header ", 30 [NPC_SIP_IPV4] = "ipv4 source ip", 31 [NPC_DIP_IPV4] = "ipv4 destination ip", 32 [NPC_IPFRAG_IPV6] = "fragmented IPv6 header ", 33 [NPC_SIP_IPV6] = "ipv6 source ip", 34 [NPC_DIP_IPV6] = "ipv6 destination ip", 35 [NPC_IPPROTO_TCP] = "ip proto tcp", 36 [NPC_IPPROTO_UDP] = "ip proto udp", 37 [NPC_IPPROTO_SCTP] = "ip proto sctp", 38 [NPC_IPPROTO_ICMP] = "ip proto icmp", 39 [NPC_IPPROTO_ICMP6] = "ip proto icmp6", 40 [NPC_IPPROTO_AH] = "ip proto AH", 41 [NPC_IPPROTO_ESP] = "ip proto ESP", 42 [NPC_SPORT_TCP] = "tcp source port", 43 [NPC_DPORT_TCP] = "tcp destination port", 44 [NPC_SPORT_UDP] = "udp source port", 45 [NPC_DPORT_UDP] = "udp destination port", 46 [NPC_SPORT_SCTP] = "sctp source port", 47 [NPC_DPORT_SCTP] = "sctp destination port", 48 [NPC_LXMB] = "Mcast/Bcast header ", 49 [NPC_UNKNOWN] = "unknown", 50 }; 51 52 bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf) 53 { 54 struct npc_mcam *mcam = &rvu->hw->mcam; 55 u64 mcam_features; 56 u64 unsupported; 57 58 mcam_features = is_npc_intf_tx(intf) ? mcam->tx_features : mcam->rx_features; 59 unsupported = (mcam_features ^ features) & ~mcam_features; 60 61 /* Return false if at least one of the input flows is not extracted */ 62 return !unsupported; 63 } 64 65 const char *npc_get_field_name(u8 hdr) 66 { 67 if (hdr >= ARRAY_SIZE(npc_flow_names)) 68 return npc_flow_names[NPC_UNKNOWN]; 69 70 return npc_flow_names[hdr]; 71 } 72 73 /* Compute keyword masks and figure out the number of keywords a field 74 * spans in the key. 75 */ 76 static void npc_set_kw_masks(struct npc_mcam *mcam, u8 type, 77 u8 nr_bits, int start_kwi, int offset, u8 intf) 78 { 79 struct npc_key_field *field = &mcam->rx_key_fields[type]; 80 u8 bits_in_kw; 81 int max_kwi; 82 83 if (mcam->banks_per_entry == 1) 84 max_kwi = 1; /* NPC_MCAM_KEY_X1 */ 85 else if (mcam->banks_per_entry == 2) 86 max_kwi = 3; /* NPC_MCAM_KEY_X2 */ 87 else 88 max_kwi = 6; /* NPC_MCAM_KEY_X4 */ 89 90 if (is_npc_intf_tx(intf)) 91 field = &mcam->tx_key_fields[type]; 92 93 if (offset + nr_bits <= 64) { 94 /* one KW only */ 95 if (start_kwi > max_kwi) 96 return; 97 field->kw_mask[start_kwi] |= GENMASK_ULL(nr_bits - 1, 0) 98 << offset; 99 field->nr_kws = 1; 100 } else if (offset + nr_bits > 64 && 101 offset + nr_bits <= 128) { 102 /* two KWs */ 103 if (start_kwi + 1 > max_kwi) 104 return; 105 /* first KW mask */ 106 bits_in_kw = 64 - offset; 107 field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0) 108 << offset; 109 /* second KW mask i.e. mask for rest of bits */ 110 bits_in_kw = nr_bits + offset - 64; 111 field->kw_mask[start_kwi + 1] |= GENMASK_ULL(bits_in_kw - 1, 0); 112 field->nr_kws = 2; 113 } else { 114 /* three KWs */ 115 if (start_kwi + 2 > max_kwi) 116 return; 117 /* first KW mask */ 118 bits_in_kw = 64 - offset; 119 field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0) 120 << offset; 121 /* second KW mask */ 122 field->kw_mask[start_kwi + 1] = ~0ULL; 123 /* third KW mask i.e. mask for rest of bits */ 124 bits_in_kw = nr_bits + offset - 128; 125 field->kw_mask[start_kwi + 2] |= GENMASK_ULL(bits_in_kw - 1, 0); 126 field->nr_kws = 3; 127 } 128 } 129 130 /* Helper function to figure out whether field exists in the key */ 131 static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf) 132 { 133 struct npc_mcam *mcam = &rvu->hw->mcam; 134 struct npc_key_field *input; 135 136 input = &mcam->rx_key_fields[type]; 137 if (is_npc_intf_tx(intf)) 138 input = &mcam->tx_key_fields[type]; 139 140 return input->nr_kws > 0; 141 } 142 143 static bool npc_is_same(struct npc_key_field *input, 144 struct npc_key_field *field) 145 { 146 return memcmp(&input->layer_mdata, &field->layer_mdata, 147 sizeof(struct npc_layer_mdata)) == 0; 148 } 149 150 static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type, 151 u64 cfg, u8 lid, u8 lt, u8 intf) 152 { 153 struct npc_key_field *input = &mcam->rx_key_fields[type]; 154 155 if (is_npc_intf_tx(intf)) 156 input = &mcam->tx_key_fields[type]; 157 158 input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); 159 input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg); 160 input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1; 161 input->layer_mdata.ltype = lt; 162 input->layer_mdata.lid = lid; 163 } 164 165 static bool npc_check_overlap_fields(struct npc_key_field *input1, 166 struct npc_key_field *input2) 167 { 168 int kwi; 169 170 /* Fields with same layer id and different ltypes are mutually 171 * exclusive hence they can be overlapped 172 */ 173 if (input1->layer_mdata.lid == input2->layer_mdata.lid && 174 input1->layer_mdata.ltype != input2->layer_mdata.ltype) 175 return false; 176 177 for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) { 178 if (input1->kw_mask[kwi] & input2->kw_mask[kwi]) 179 return true; 180 } 181 182 return false; 183 } 184 185 /* Helper function to check whether given field overlaps with any other fields 186 * in the key. Due to limitations on key size and the key extraction profile in 187 * use higher layers can overwrite lower layer's header fields. Hence overlap 188 * needs to be checked. 189 */ 190 static bool npc_check_overlap(struct rvu *rvu, int blkaddr, 191 enum key_fields type, u8 start_lid, u8 intf) 192 { 193 struct npc_mcam *mcam = &rvu->hw->mcam; 194 struct npc_key_field *dummy, *input; 195 int start_kwi, offset; 196 u8 nr_bits, lid, lt, ld; 197 u64 cfg; 198 199 dummy = &mcam->rx_key_fields[NPC_UNKNOWN]; 200 input = &mcam->rx_key_fields[type]; 201 202 if (is_npc_intf_tx(intf)) { 203 dummy = &mcam->tx_key_fields[NPC_UNKNOWN]; 204 input = &mcam->tx_key_fields[type]; 205 } 206 207 for (lid = start_lid; lid < NPC_MAX_LID; lid++) { 208 for (lt = 0; lt < NPC_MAX_LT; lt++) { 209 for (ld = 0; ld < NPC_MAX_LD; ld++) { 210 cfg = rvu_read64(rvu, blkaddr, 211 NPC_AF_INTFX_LIDX_LTX_LDX_CFG 212 (intf, lid, lt, ld)); 213 if (!FIELD_GET(NPC_LDATA_EN, cfg)) 214 continue; 215 memset(dummy, 0, sizeof(struct npc_key_field)); 216 npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg, 217 lid, lt, intf); 218 /* exclude input */ 219 if (npc_is_same(input, dummy)) 220 continue; 221 start_kwi = dummy->layer_mdata.key / 8; 222 offset = (dummy->layer_mdata.key * 8) % 64; 223 nr_bits = dummy->layer_mdata.len * 8; 224 /* form KW masks */ 225 npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits, 226 start_kwi, offset, intf); 227 /* check any input field bits falls in any 228 * other field bits. 229 */ 230 if (npc_check_overlap_fields(dummy, input)) 231 return true; 232 } 233 } 234 } 235 236 return false; 237 } 238 239 static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type, 240 u8 intf) 241 { 242 if (!npc_is_field_present(rvu, type, intf) || 243 npc_check_overlap(rvu, blkaddr, type, 0, intf)) 244 return false; 245 return true; 246 } 247 248 static void npc_scan_exact_result(struct npc_mcam *mcam, u8 bit_number, 249 u8 key_nibble, u8 intf) 250 { 251 u8 offset = (key_nibble * 4) % 64; /* offset within key word */ 252 u8 kwi = (key_nibble * 4) / 64; /* which word in key */ 253 u8 nr_bits = 4; /* bits in a nibble */ 254 u8 type; 255 256 switch (bit_number) { 257 case 40 ... 43: 258 type = NPC_EXACT_RESULT; 259 break; 260 261 default: 262 return; 263 } 264 npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf); 265 } 266 267 static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number, 268 u8 key_nibble, u8 intf) 269 { 270 u8 offset = (key_nibble * 4) % 64; /* offset within key word */ 271 u8 kwi = (key_nibble * 4) / 64; /* which word in key */ 272 u8 nr_bits = 4; /* bits in a nibble */ 273 u8 type; 274 275 switch (bit_number) { 276 case 0 ... 2: 277 type = NPC_CHAN; 278 break; 279 case 3: 280 type = NPC_ERRLEV; 281 break; 282 case 4 ... 5: 283 type = NPC_ERRCODE; 284 break; 285 case 6: 286 type = NPC_LXMB; 287 break; 288 /* check for LTYPE only as of now */ 289 case 9: 290 type = NPC_LA; 291 break; 292 case 12: 293 type = NPC_LB; 294 break; 295 case 15: 296 type = NPC_LC; 297 break; 298 case 18: 299 type = NPC_LD; 300 break; 301 case 21: 302 type = NPC_LE; 303 break; 304 case 24: 305 type = NPC_LF; 306 break; 307 case 27: 308 type = NPC_LG; 309 break; 310 case 30: 311 type = NPC_LH; 312 break; 313 default: 314 return; 315 } 316 317 npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf); 318 } 319 320 static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf) 321 { 322 struct npc_mcam *mcam = &rvu->hw->mcam; 323 struct npc_key_field *key_fields; 324 /* Ether type can come from three layers 325 * (ethernet, single tagged, double tagged) 326 */ 327 struct npc_key_field *etype_ether; 328 struct npc_key_field *etype_tag1; 329 struct npc_key_field *etype_tag2; 330 /* Outer VLAN TCI can come from two layers 331 * (single tagged, double tagged) 332 */ 333 struct npc_key_field *vlan_tag1; 334 struct npc_key_field *vlan_tag2; 335 u64 *features; 336 u8 start_lid; 337 int i; 338 339 key_fields = mcam->rx_key_fields; 340 features = &mcam->rx_features; 341 342 if (is_npc_intf_tx(intf)) { 343 key_fields = mcam->tx_key_fields; 344 features = &mcam->tx_features; 345 } 346 347 /* Handle header fields which can come from multiple layers like 348 * etype, outer vlan tci. These fields should have same position in 349 * the key otherwise to install a mcam rule more than one entry is 350 * needed which complicates mcam space management. 351 */ 352 etype_ether = &key_fields[NPC_ETYPE_ETHER]; 353 etype_tag1 = &key_fields[NPC_ETYPE_TAG1]; 354 etype_tag2 = &key_fields[NPC_ETYPE_TAG2]; 355 vlan_tag1 = &key_fields[NPC_VLAN_TAG1]; 356 vlan_tag2 = &key_fields[NPC_VLAN_TAG2]; 357 358 /* if key profile programmed does not extract Ethertype at all */ 359 if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) { 360 dev_err(rvu->dev, "mkex: Ethertype is not extracted.\n"); 361 goto vlan_tci; 362 } 363 364 /* if key profile programmed extracts Ethertype from one layer */ 365 if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) 366 key_fields[NPC_ETYPE] = *etype_ether; 367 if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws) 368 key_fields[NPC_ETYPE] = *etype_tag1; 369 if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws) 370 key_fields[NPC_ETYPE] = *etype_tag2; 371 372 /* if key profile programmed extracts Ethertype from multiple layers */ 373 if (etype_ether->nr_kws && etype_tag1->nr_kws) { 374 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 375 if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) { 376 dev_err(rvu->dev, "mkex: Etype pos is different for untagged and tagged pkts.\n"); 377 goto vlan_tci; 378 } 379 } 380 key_fields[NPC_ETYPE] = *etype_tag1; 381 } 382 if (etype_ether->nr_kws && etype_tag2->nr_kws) { 383 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 384 if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) { 385 dev_err(rvu->dev, "mkex: Etype pos is different for untagged and double tagged pkts.\n"); 386 goto vlan_tci; 387 } 388 } 389 key_fields[NPC_ETYPE] = *etype_tag2; 390 } 391 if (etype_tag1->nr_kws && etype_tag2->nr_kws) { 392 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 393 if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) { 394 dev_err(rvu->dev, "mkex: Etype pos is different for tagged and double tagged pkts.\n"); 395 goto vlan_tci; 396 } 397 } 398 key_fields[NPC_ETYPE] = *etype_tag2; 399 } 400 401 /* check none of higher layers overwrite Ethertype */ 402 start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1; 403 if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) { 404 dev_err(rvu->dev, "mkex: Ethertype is overwritten by higher layers.\n"); 405 goto vlan_tci; 406 } 407 *features |= BIT_ULL(NPC_ETYPE); 408 vlan_tci: 409 /* if key profile does not extract outer vlan tci at all */ 410 if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) { 411 dev_err(rvu->dev, "mkex: Outer vlan tci is not extracted.\n"); 412 goto done; 413 } 414 415 /* if key profile extracts outer vlan tci from one layer */ 416 if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws) 417 key_fields[NPC_OUTER_VID] = *vlan_tag1; 418 if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws) 419 key_fields[NPC_OUTER_VID] = *vlan_tag2; 420 421 /* if key profile extracts outer vlan tci from multiple layers */ 422 if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) { 423 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 424 if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) { 425 dev_err(rvu->dev, "mkex: Out vlan tci pos is different for tagged and double tagged pkts.\n"); 426 goto done; 427 } 428 } 429 key_fields[NPC_OUTER_VID] = *vlan_tag2; 430 } 431 /* check none of higher layers overwrite outer vlan tci */ 432 start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1; 433 if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) { 434 dev_err(rvu->dev, "mkex: Outer vlan tci is overwritten by higher layers.\n"); 435 goto done; 436 } 437 *features |= BIT_ULL(NPC_OUTER_VID); 438 done: 439 return; 440 } 441 442 static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid, 443 u8 lt, u64 cfg, u8 intf) 444 { 445 struct npc_mcam *mcam = &rvu->hw->mcam; 446 u8 hdr, key, nr_bytes, bit_offset; 447 u8 la_ltype, la_start; 448 /* starting KW index and starting bit position */ 449 int start_kwi, offset; 450 451 nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1; 452 hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); 453 key = FIELD_GET(NPC_KEY_OFFSET, cfg); 454 455 /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding 456 * ethernet header. 457 */ 458 if (is_npc_intf_tx(intf)) { 459 la_ltype = NPC_LT_LA_IH_NIX_ETHER; 460 la_start = 8; 461 } else { 462 la_ltype = NPC_LT_LA_ETHER; 463 la_start = 0; 464 } 465 466 #define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \ 467 do { \ 468 start_kwi = key / 8; \ 469 offset = (key * 8) % 64; \ 470 if (lid == (hlid) && lt == (hlt)) { \ 471 if ((hstart) >= hdr && \ 472 ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \ 473 bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \ 474 npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \ 475 offset += bit_offset; \ 476 start_kwi += offset / 64; \ 477 offset %= 64; \ 478 npc_set_kw_masks(mcam, (name), (hlen) * 8, \ 479 start_kwi, offset, intf); \ 480 } \ 481 } \ 482 } while (0) 483 484 /* List LID, LTYPE, start offset from layer and length(in bytes) of 485 * packet header fields below. 486 * Example: Source IP is 4 bytes and starts at 12th byte of IP header 487 */ 488 NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1); 489 NPC_SCAN_HDR(NPC_IPFRAG_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 6, 1); 490 NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4); 491 NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4); 492 NPC_SCAN_HDR(NPC_IPFRAG_IPV6, NPC_LID_LC, NPC_LT_LC_IP6_EXT, 6, 1); 493 NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); 494 NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16); 495 NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2); 496 NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2); 497 NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2); 498 NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2); 499 NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2); 500 NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2); 501 NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2); 502 NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2); 503 NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2); 504 NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2); 505 NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2); 506 NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6); 507 /* SMAC follows the DMAC(which is 6 bytes) */ 508 NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6); 509 /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */ 510 NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2); 511 } 512 513 static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) 514 { 515 struct npc_mcam *mcam = &rvu->hw->mcam; 516 u64 *features = &mcam->rx_features; 517 u64 tcp_udp_sctp; 518 int hdr; 519 520 if (is_npc_intf_tx(intf)) 521 features = &mcam->tx_features; 522 523 for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) { 524 if (npc_check_field(rvu, blkaddr, hdr, intf)) 525 *features |= BIT_ULL(hdr); 526 } 527 528 tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) | 529 BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) | 530 BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP); 531 532 /* for tcp/udp/sctp corresponding layer type should be in the key */ 533 if (*features & tcp_udp_sctp) { 534 if (!npc_check_field(rvu, blkaddr, NPC_LD, intf)) 535 *features &= ~tcp_udp_sctp; 536 else 537 *features |= BIT_ULL(NPC_IPPROTO_TCP) | 538 BIT_ULL(NPC_IPPROTO_UDP) | 539 BIT_ULL(NPC_IPPROTO_SCTP); 540 } 541 542 /* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */ 543 if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) { 544 *features |= BIT_ULL(NPC_IPPROTO_AH); 545 *features |= BIT_ULL(NPC_IPPROTO_ICMP); 546 *features |= BIT_ULL(NPC_IPPROTO_ICMP6); 547 } 548 549 /* for ESP, check if corresponding layer type is present in the key */ 550 if (npc_check_field(rvu, blkaddr, NPC_LE, intf)) 551 *features |= BIT_ULL(NPC_IPPROTO_ESP); 552 553 /* for vlan corresponding layer type should be in the key */ 554 if (*features & BIT_ULL(NPC_OUTER_VID)) 555 if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) 556 *features &= ~BIT_ULL(NPC_OUTER_VID); 557 558 /* for vlan ethertypes corresponding layer type should be in the key */ 559 if (npc_check_field(rvu, blkaddr, NPC_LB, intf)) 560 *features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) | 561 BIT_ULL(NPC_VLAN_ETYPE_STAG); 562 563 /* for L2M/L2B/L3M/L3B, check if the type is present in the key */ 564 if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf)) 565 *features |= BIT_ULL(NPC_LXMB); 566 } 567 568 /* Scan key extraction profile and record how fields of our interest 569 * fill the key structure. Also verify Channel and DMAC exists in 570 * key and not overwritten by other header fields. 571 */ 572 static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf) 573 { 574 struct npc_mcam *mcam = &rvu->hw->mcam; 575 u8 lid, lt, ld, bitnr; 576 u64 cfg, masked_cfg; 577 u8 key_nibble = 0; 578 579 /* Scan and note how parse result is going to be in key. 580 * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from 581 * parse result in the key. The enabled nibbles from parse result 582 * will be concatenated in key. 583 */ 584 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf)); 585 masked_cfg = cfg & NPC_PARSE_NIBBLE; 586 for_each_set_bit(bitnr, (unsigned long *)&masked_cfg, 31) { 587 npc_scan_parse_result(mcam, bitnr, key_nibble, intf); 588 key_nibble++; 589 } 590 591 /* Ignore exact match bits for mcam entries except the first rule 592 * which is drop on hit. This first rule is configured explitcitly by 593 * exact match code. 594 */ 595 masked_cfg = cfg & NPC_EXACT_NIBBLE; 596 bitnr = NPC_EXACT_NIBBLE_START; 597 for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg, 598 NPC_EXACT_NIBBLE_START) { 599 npc_scan_exact_result(mcam, bitnr, key_nibble, intf); 600 key_nibble++; 601 } 602 603 /* Scan and note how layer data is going to be in key */ 604 for (lid = 0; lid < NPC_MAX_LID; lid++) { 605 for (lt = 0; lt < NPC_MAX_LT; lt++) { 606 for (ld = 0; ld < NPC_MAX_LD; ld++) { 607 cfg = rvu_read64(rvu, blkaddr, 608 NPC_AF_INTFX_LIDX_LTX_LDX_CFG 609 (intf, lid, lt, ld)); 610 if (!FIELD_GET(NPC_LDATA_EN, cfg)) 611 continue; 612 npc_scan_ldata(rvu, blkaddr, lid, lt, cfg, 613 intf); 614 } 615 } 616 } 617 618 return 0; 619 } 620 621 static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr) 622 { 623 int err; 624 625 err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX); 626 if (err) 627 return err; 628 629 err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX); 630 if (err) 631 return err; 632 633 /* Channel is mandatory */ 634 if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) { 635 dev_err(rvu->dev, "Channel not present in Key\n"); 636 return -EINVAL; 637 } 638 /* check that none of the fields overwrite channel */ 639 if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) { 640 dev_err(rvu->dev, "Channel cannot be overwritten\n"); 641 return -EINVAL; 642 } 643 644 npc_set_features(rvu, blkaddr, NIX_INTF_TX); 645 npc_set_features(rvu, blkaddr, NIX_INTF_RX); 646 npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX); 647 npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX); 648 649 return 0; 650 } 651 652 int npc_flow_steering_init(struct rvu *rvu, int blkaddr) 653 { 654 struct npc_mcam *mcam = &rvu->hw->mcam; 655 656 INIT_LIST_HEAD(&mcam->mcam_rules); 657 658 return npc_scan_verify_kex(rvu, blkaddr); 659 } 660 661 static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf) 662 { 663 struct npc_mcam *mcam = &rvu->hw->mcam; 664 u64 *mcam_features = &mcam->rx_features; 665 u64 unsupported; 666 u8 bit; 667 668 if (is_npc_intf_tx(intf)) 669 mcam_features = &mcam->tx_features; 670 671 unsupported = (*mcam_features ^ features) & ~(*mcam_features); 672 if (unsupported) { 673 dev_warn(rvu->dev, "Unsupported flow(s):\n"); 674 for_each_set_bit(bit, (unsigned long *)&unsupported, 64) 675 dev_warn(rvu->dev, "%s ", npc_get_field_name(bit)); 676 return -EOPNOTSUPP; 677 } 678 679 return 0; 680 } 681 682 /* npc_update_entry - Based on the masks generated during 683 * the key scanning, updates the given entry with value and 684 * masks for the field of interest. Maximum 16 bytes of a packet 685 * header can be extracted by HW hence lo and hi are sufficient. 686 * When field bytes are less than or equal to 8 then hi should be 687 * 0 for value and mask. 688 * 689 * If exact match of value is required then mask should be all 1's. 690 * If any bits in mask are 0 then corresponding bits in value are 691 * dont care. 692 */ 693 void npc_update_entry(struct rvu *rvu, enum key_fields type, 694 struct mcam_entry *entry, u64 val_lo, 695 u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf) 696 { 697 struct npc_mcam *mcam = &rvu->hw->mcam; 698 struct mcam_entry dummy = { {0} }; 699 struct npc_key_field *field; 700 u64 kw1, kw2, kw3; 701 u8 shift; 702 int i; 703 704 field = &mcam->rx_key_fields[type]; 705 if (is_npc_intf_tx(intf)) 706 field = &mcam->tx_key_fields[type]; 707 708 if (!field->nr_kws) 709 return; 710 711 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 712 if (!field->kw_mask[i]) 713 continue; 714 /* place key value in kw[x] */ 715 shift = __ffs64(field->kw_mask[i]); 716 /* update entry value */ 717 kw1 = (val_lo << shift) & field->kw_mask[i]; 718 dummy.kw[i] = kw1; 719 /* update entry mask */ 720 kw1 = (mask_lo << shift) & field->kw_mask[i]; 721 dummy.kw_mask[i] = kw1; 722 723 if (field->nr_kws == 1) 724 break; 725 /* place remaining bits of key value in kw[x + 1] */ 726 if (field->nr_kws == 2) { 727 /* update entry value */ 728 kw2 = shift ? val_lo >> (64 - shift) : 0; 729 kw2 |= (val_hi << shift); 730 kw2 &= field->kw_mask[i + 1]; 731 dummy.kw[i + 1] = kw2; 732 /* update entry mask */ 733 kw2 = shift ? mask_lo >> (64 - shift) : 0; 734 kw2 |= (mask_hi << shift); 735 kw2 &= field->kw_mask[i + 1]; 736 dummy.kw_mask[i + 1] = kw2; 737 break; 738 } 739 /* place remaining bits of key value in kw[x + 1], kw[x + 2] */ 740 if (field->nr_kws == 3) { 741 /* update entry value */ 742 kw2 = shift ? val_lo >> (64 - shift) : 0; 743 kw2 |= (val_hi << shift); 744 kw2 &= field->kw_mask[i + 1]; 745 kw3 = shift ? val_hi >> (64 - shift) : 0; 746 kw3 &= field->kw_mask[i + 2]; 747 dummy.kw[i + 1] = kw2; 748 dummy.kw[i + 2] = kw3; 749 /* update entry mask */ 750 kw2 = shift ? mask_lo >> (64 - shift) : 0; 751 kw2 |= (mask_hi << shift); 752 kw2 &= field->kw_mask[i + 1]; 753 kw3 = shift ? mask_hi >> (64 - shift) : 0; 754 kw3 &= field->kw_mask[i + 2]; 755 dummy.kw_mask[i + 1] = kw2; 756 dummy.kw_mask[i + 2] = kw3; 757 break; 758 } 759 } 760 /* dummy is ready with values and masks for given key 761 * field now clear and update input entry with those 762 */ 763 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 764 if (!field->kw_mask[i]) 765 continue; 766 entry->kw[i] &= ~field->kw_mask[i]; 767 entry->kw_mask[i] &= ~field->kw_mask[i]; 768 769 entry->kw[i] |= dummy.kw[i]; 770 entry->kw_mask[i] |= dummy.kw_mask[i]; 771 } 772 } 773 774 static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry, 775 u64 features, struct flow_msg *pkt, 776 struct flow_msg *mask, 777 struct rvu_npc_mcam_rule *output, u8 intf) 778 { 779 u32 src_ip[IPV6_WORDS], src_ip_mask[IPV6_WORDS]; 780 u32 dst_ip[IPV6_WORDS], dst_ip_mask[IPV6_WORDS]; 781 struct flow_msg *opkt = &output->packet; 782 struct flow_msg *omask = &output->mask; 783 u64 mask_lo, mask_hi; 784 u64 val_lo, val_hi; 785 786 /* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet 787 * values to be programmed in MCAM should as below: 788 * val_high: 0xfe80000000000000 789 * val_low: 0x2c6863fffe5e2d0a 790 */ 791 if (features & BIT_ULL(NPC_SIP_IPV6)) { 792 be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS); 793 be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS); 794 795 mask_hi = (u64)src_ip_mask[0] << 32 | src_ip_mask[1]; 796 mask_lo = (u64)src_ip_mask[2] << 32 | src_ip_mask[3]; 797 val_hi = (u64)src_ip[0] << 32 | src_ip[1]; 798 val_lo = (u64)src_ip[2] << 32 | src_ip[3]; 799 800 npc_update_entry(rvu, NPC_SIP_IPV6, entry, val_lo, val_hi, 801 mask_lo, mask_hi, intf); 802 memcpy(opkt->ip6src, pkt->ip6src, sizeof(opkt->ip6src)); 803 memcpy(omask->ip6src, mask->ip6src, sizeof(omask->ip6src)); 804 } 805 if (features & BIT_ULL(NPC_DIP_IPV6)) { 806 be32_to_cpu_array(dst_ip_mask, mask->ip6dst, IPV6_WORDS); 807 be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS); 808 809 mask_hi = (u64)dst_ip_mask[0] << 32 | dst_ip_mask[1]; 810 mask_lo = (u64)dst_ip_mask[2] << 32 | dst_ip_mask[3]; 811 val_hi = (u64)dst_ip[0] << 32 | dst_ip[1]; 812 val_lo = (u64)dst_ip[2] << 32 | dst_ip[3]; 813 814 npc_update_entry(rvu, NPC_DIP_IPV6, entry, val_lo, val_hi, 815 mask_lo, mask_hi, intf); 816 memcpy(opkt->ip6dst, pkt->ip6dst, sizeof(opkt->ip6dst)); 817 memcpy(omask->ip6dst, mask->ip6dst, sizeof(omask->ip6dst)); 818 } 819 } 820 821 static void npc_update_vlan_features(struct rvu *rvu, struct mcam_entry *entry, 822 u64 features, u8 intf) 823 { 824 bool ctag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_CTAG)); 825 bool stag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_STAG)); 826 bool vid = !!(features & BIT_ULL(NPC_OUTER_VID)); 827 828 /* If only VLAN id is given then always match outer VLAN id */ 829 if (vid && !ctag && !stag) { 830 npc_update_entry(rvu, NPC_LB, entry, 831 NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0, 832 NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf); 833 return; 834 } 835 if (ctag) 836 npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_CTAG, 0, 837 ~0ULL, 0, intf); 838 if (stag) 839 npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_STAG_QINQ, 0, 840 ~0ULL, 0, intf); 841 } 842 843 static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, 844 u64 features, struct flow_msg *pkt, 845 struct flow_msg *mask, 846 struct rvu_npc_mcam_rule *output, u8 intf, 847 int blkaddr) 848 { 849 u64 dmac_mask = ether_addr_to_u64(mask->dmac); 850 u64 smac_mask = ether_addr_to_u64(mask->smac); 851 u64 dmac_val = ether_addr_to_u64(pkt->dmac); 852 u64 smac_val = ether_addr_to_u64(pkt->smac); 853 struct flow_msg *opkt = &output->packet; 854 struct flow_msg *omask = &output->mask; 855 856 if (!features) 857 return; 858 859 /* For tcp/udp/sctp LTYPE should be present in entry */ 860 if (features & BIT_ULL(NPC_IPPROTO_TCP)) 861 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP, 862 0, ~0ULL, 0, intf); 863 if (features & BIT_ULL(NPC_IPPROTO_UDP)) 864 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP, 865 0, ~0ULL, 0, intf); 866 if (features & BIT_ULL(NPC_IPPROTO_SCTP)) 867 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP, 868 0, ~0ULL, 0, intf); 869 if (features & BIT_ULL(NPC_IPPROTO_ICMP)) 870 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP, 871 0, ~0ULL, 0, intf); 872 if (features & BIT_ULL(NPC_IPPROTO_ICMP6)) 873 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6, 874 0, ~0ULL, 0, intf); 875 876 /* For AH, LTYPE should be present in entry */ 877 if (features & BIT_ULL(NPC_IPPROTO_AH)) 878 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH, 879 0, ~0ULL, 0, intf); 880 /* For ESP, LTYPE should be present in entry */ 881 if (features & BIT_ULL(NPC_IPPROTO_ESP)) 882 npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP, 883 0, ~0ULL, 0, intf); 884 885 if (features & BIT_ULL(NPC_LXMB)) { 886 output->lxmb = is_broadcast_ether_addr(pkt->dmac) ? 2 : 1; 887 npc_update_entry(rvu, NPC_LXMB, entry, output->lxmb, 0, 888 output->lxmb, 0, intf); 889 } 890 #define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \ 891 do { \ 892 if (features & BIT_ULL((field))) { \ 893 npc_update_entry(rvu, (field), entry, (val_lo), (val_hi), \ 894 (mask_lo), (mask_hi), intf); \ 895 memcpy(&opkt->member, &pkt->member, sizeof(pkt->member)); \ 896 memcpy(&omask->member, &mask->member, sizeof(mask->member)); \ 897 } \ 898 } while (0) 899 900 NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0); 901 902 NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0); 903 NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0, 904 ntohs(mask->etype), 0); 905 NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0); 906 NPC_WRITE_FLOW(NPC_IPFRAG_IPV4, ip_flag, pkt->ip_flag, 0, 907 mask->ip_flag, 0); 908 NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0, 909 ntohl(mask->ip4src), 0); 910 NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0, 911 ntohl(mask->ip4dst), 0); 912 NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0, 913 ntohs(mask->sport), 0); 914 NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0, 915 ntohs(mask->sport), 0); 916 NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0, 917 ntohs(mask->dport), 0); 918 NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0, 919 ntohs(mask->dport), 0); 920 NPC_WRITE_FLOW(NPC_SPORT_SCTP, sport, ntohs(pkt->sport), 0, 921 ntohs(mask->sport), 0); 922 NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0, 923 ntohs(mask->dport), 0); 924 925 NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0, 926 ntohs(mask->vlan_tci), 0); 927 928 NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0, 929 mask->next_header, 0); 930 npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf); 931 npc_update_vlan_features(rvu, entry, features, intf); 932 933 npc_update_field_hash(rvu, intf, entry, blkaddr, features, 934 pkt, mask, opkt, omask); 935 } 936 937 static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, u16 entry) 938 { 939 struct rvu_npc_mcam_rule *iter; 940 941 mutex_lock(&mcam->lock); 942 list_for_each_entry(iter, &mcam->mcam_rules, list) { 943 if (iter->entry == entry) { 944 mutex_unlock(&mcam->lock); 945 return iter; 946 } 947 } 948 mutex_unlock(&mcam->lock); 949 950 return NULL; 951 } 952 953 static void rvu_mcam_add_rule(struct npc_mcam *mcam, 954 struct rvu_npc_mcam_rule *rule) 955 { 956 struct list_head *head = &mcam->mcam_rules; 957 struct rvu_npc_mcam_rule *iter; 958 959 mutex_lock(&mcam->lock); 960 list_for_each_entry(iter, &mcam->mcam_rules, list) { 961 if (iter->entry > rule->entry) 962 break; 963 head = &iter->list; 964 } 965 966 list_add(&rule->list, head); 967 mutex_unlock(&mcam->lock); 968 } 969 970 static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc, 971 struct rvu_npc_mcam_rule *rule) 972 { 973 struct npc_mcam_oper_counter_req free_req = { 0 }; 974 struct msg_rsp free_rsp; 975 976 if (!rule->has_cntr) 977 return; 978 979 free_req.hdr.pcifunc = pcifunc; 980 free_req.cntr = rule->cntr; 981 982 rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp); 983 rule->has_cntr = false; 984 } 985 986 static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, 987 struct rvu_npc_mcam_rule *rule, 988 struct npc_install_flow_rsp *rsp) 989 { 990 struct npc_mcam_alloc_counter_req cntr_req = { 0 }; 991 struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; 992 int err; 993 994 cntr_req.hdr.pcifunc = pcifunc; 995 cntr_req.contig = true; 996 cntr_req.count = 1; 997 998 /* we try to allocate a counter to track the stats of this 999 * rule. If counter could not be allocated then proceed 1000 * without counter because counters are limited than entries. 1001 */ 1002 err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, 1003 &cntr_rsp); 1004 if (!err && cntr_rsp.count) { 1005 rule->cntr = cntr_rsp.cntr; 1006 rule->has_cntr = true; 1007 rsp->counter = rule->cntr; 1008 } else { 1009 rsp->counter = err; 1010 } 1011 } 1012 1013 static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, 1014 struct mcam_entry *entry, 1015 struct npc_install_flow_req *req, 1016 u16 target, bool pf_set_vfs_mac) 1017 { 1018 struct rvu_switch *rswitch = &rvu->rswitch; 1019 struct nix_rx_action action; 1020 1021 if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac) 1022 req->chan_mask = 0x0; /* Do not care channel */ 1023 1024 npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask, 1025 0, NIX_INTF_RX); 1026 1027 *(u64 *)&action = 0x00; 1028 action.pf_func = target; 1029 action.op = req->op; 1030 action.index = req->index; 1031 action.match_id = req->match_id; 1032 action.flow_key_alg = req->flow_key_alg; 1033 1034 if (req->op == NIX_RX_ACTION_DEFAULT) { 1035 if (pfvf->def_ucast_rule) { 1036 action = pfvf->def_ucast_rule->rx_action; 1037 } else { 1038 /* For profiles which do not extract DMAC, the default 1039 * unicast entry is unused. Hence modify action for the 1040 * requests which use same action as default unicast 1041 * entry 1042 */ 1043 *(u64 *)&action = 0; 1044 action.pf_func = target; 1045 action.op = NIX_RX_ACTIONOP_UCAST; 1046 } 1047 } 1048 1049 entry->action = *(u64 *)&action; 1050 1051 /* VTAG0 starts at 0th byte of LID_B. 1052 * VTAG1 starts at 4th byte of LID_B. 1053 */ 1054 entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) | 1055 FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) | 1056 FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) | 1057 FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) | 1058 FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) | 1059 FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) | 1060 FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) | 1061 FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4); 1062 } 1063 1064 static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, 1065 struct mcam_entry *entry, 1066 struct npc_install_flow_req *req, u16 target) 1067 { 1068 struct nix_tx_action action; 1069 u64 mask = ~0ULL; 1070 1071 /* If AF is installing then do not care about 1072 * PF_FUNC in Send Descriptor 1073 */ 1074 if (is_pffunc_af(req->hdr.pcifunc)) 1075 mask = 0; 1076 1077 npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target), 1078 0, mask, 0, NIX_INTF_TX); 1079 1080 *(u64 *)&action = 0x00; 1081 action.op = req->op; 1082 action.index = req->index; 1083 action.match_id = req->match_id; 1084 1085 entry->action = *(u64 *)&action; 1086 1087 /* VTAG0 starts at 0th byte of LID_B. 1088 * VTAG1 starts at 4th byte of LID_B. 1089 */ 1090 entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) | 1091 FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) | 1092 FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) | 1093 FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) | 1094 FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) | 1095 FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) | 1096 FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) | 1097 FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24); 1098 } 1099 1100 static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, 1101 int nixlf, struct rvu_pfvf *pfvf, 1102 struct npc_install_flow_req *req, 1103 struct npc_install_flow_rsp *rsp, bool enable, 1104 bool pf_set_vfs_mac) 1105 { 1106 struct rvu_npc_mcam_rule *def_ucast_rule = pfvf->def_ucast_rule; 1107 u64 features, installed_features, missing_features = 0; 1108 struct npc_mcam_write_entry_req write_req = { 0 }; 1109 struct npc_mcam *mcam = &rvu->hw->mcam; 1110 struct rvu_npc_mcam_rule dummy = { 0 }; 1111 struct rvu_npc_mcam_rule *rule; 1112 u16 owner = req->hdr.pcifunc; 1113 struct msg_rsp write_rsp; 1114 struct mcam_entry *entry; 1115 bool new = false; 1116 u16 entry_index; 1117 int err; 1118 1119 installed_features = req->features; 1120 features = req->features; 1121 entry = &write_req.entry_data; 1122 entry_index = req->entry; 1123 1124 npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy, 1125 req->intf, blkaddr); 1126 1127 if (is_npc_intf_rx(req->intf)) 1128 npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac); 1129 else 1130 npc_update_tx_entry(rvu, pfvf, entry, req, target); 1131 1132 /* Default unicast rules do not exist for TX */ 1133 if (is_npc_intf_tx(req->intf)) 1134 goto find_rule; 1135 1136 if (req->default_rule) { 1137 entry_index = npc_get_nixlf_mcam_index(mcam, target, nixlf, 1138 NIXLF_UCAST_ENTRY); 1139 enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, entry_index); 1140 } 1141 1142 /* update mcam entry with default unicast rule attributes */ 1143 if (def_ucast_rule && (req->default_rule && req->append)) { 1144 missing_features = (def_ucast_rule->features ^ features) & 1145 def_ucast_rule->features; 1146 if (missing_features) 1147 npc_update_flow(rvu, entry, missing_features, 1148 &def_ucast_rule->packet, 1149 &def_ucast_rule->mask, 1150 &dummy, req->intf, 1151 blkaddr); 1152 installed_features = req->features | missing_features; 1153 } 1154 1155 find_rule: 1156 rule = rvu_mcam_find_rule(mcam, entry_index); 1157 if (!rule) { 1158 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 1159 if (!rule) 1160 return -ENOMEM; 1161 new = true; 1162 } 1163 1164 /* allocate new counter if rule has no counter */ 1165 if (!req->default_rule && req->set_cntr && !rule->has_cntr) 1166 rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp); 1167 1168 /* if user wants to delete an existing counter for a rule then 1169 * free the counter 1170 */ 1171 if (!req->set_cntr && rule->has_cntr) 1172 rvu_mcam_remove_counter_from_rule(rvu, owner, rule); 1173 1174 write_req.hdr.pcifunc = owner; 1175 1176 /* AF owns the default rules so change the owner just to relax 1177 * the checks in rvu_mbox_handler_npc_mcam_write_entry 1178 */ 1179 if (req->default_rule) 1180 write_req.hdr.pcifunc = 0; 1181 1182 write_req.entry = entry_index; 1183 write_req.intf = req->intf; 1184 write_req.enable_entry = (u8)enable; 1185 /* if counter is available then clear and use it */ 1186 if (req->set_cntr && rule->has_cntr) { 1187 rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00); 1188 write_req.set_cntr = 1; 1189 write_req.cntr = rule->cntr; 1190 } 1191 1192 /* update rule */ 1193 memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet)); 1194 memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask)); 1195 rule->entry = entry_index; 1196 memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action)); 1197 if (is_npc_intf_tx(req->intf)) 1198 memcpy(&rule->tx_action, &entry->action, 1199 sizeof(struct nix_tx_action)); 1200 rule->vtag_action = entry->vtag_action; 1201 rule->features = installed_features; 1202 rule->default_rule = req->default_rule; 1203 rule->owner = owner; 1204 rule->enable = enable; 1205 rule->chan_mask = write_req.entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; 1206 rule->chan = write_req.entry_data.kw[0] & NPC_KEX_CHAN_MASK; 1207 rule->chan &= rule->chan_mask; 1208 rule->lxmb = dummy.lxmb; 1209 if (is_npc_intf_tx(req->intf)) 1210 rule->intf = pfvf->nix_tx_intf; 1211 else 1212 rule->intf = pfvf->nix_rx_intf; 1213 1214 if (new) 1215 rvu_mcam_add_rule(mcam, rule); 1216 if (req->default_rule) 1217 pfvf->def_ucast_rule = rule; 1218 1219 /* write to mcam entry registers */ 1220 err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, 1221 &write_rsp); 1222 if (err) { 1223 rvu_mcam_remove_counter_from_rule(rvu, owner, rule); 1224 if (new) { 1225 list_del(&rule->list); 1226 kfree(rule); 1227 } 1228 return err; 1229 } 1230 1231 /* VF's MAC address is being changed via PF */ 1232 if (pf_set_vfs_mac) { 1233 ether_addr_copy(pfvf->default_mac, req->packet.dmac); 1234 ether_addr_copy(pfvf->mac_addr, req->packet.dmac); 1235 set_bit(PF_SET_VF_MAC, &pfvf->flags); 1236 } 1237 1238 if (test_bit(PF_SET_VF_CFG, &pfvf->flags) && 1239 req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7) 1240 rule->vfvlan_cfg = true; 1241 1242 if (is_npc_intf_rx(req->intf) && req->match_id && 1243 (req->op == NIX_RX_ACTIONOP_UCAST || req->op == NIX_RX_ACTIONOP_RSS)) 1244 return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc, 1245 req->index, req->match_id); 1246 1247 return 0; 1248 } 1249 1250 int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, 1251 struct npc_install_flow_req *req, 1252 struct npc_install_flow_rsp *rsp) 1253 { 1254 bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK); 1255 struct rvu_switch *rswitch = &rvu->rswitch; 1256 int blkaddr, nixlf, err; 1257 struct rvu_pfvf *pfvf; 1258 bool pf_set_vfs_mac = false; 1259 bool enable = true; 1260 u16 target; 1261 1262 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1263 if (blkaddr < 0) { 1264 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1265 return NPC_MCAM_INVALID_REQ; 1266 } 1267 1268 if (!is_npc_interface_valid(rvu, req->intf)) 1269 return NPC_FLOW_INTF_INVALID; 1270 1271 /* If DMAC is not extracted in MKEX, rules installed by AF 1272 * can rely on L2MB bit set by hardware protocol checker for 1273 * broadcast and multicast addresses. 1274 */ 1275 if (npc_check_field(rvu, blkaddr, NPC_DMAC, req->intf)) 1276 goto process_flow; 1277 1278 if (is_pffunc_af(req->hdr.pcifunc) && 1279 req->features & BIT_ULL(NPC_DMAC)) { 1280 if (is_unicast_ether_addr(req->packet.dmac)) { 1281 dev_warn(rvu->dev, 1282 "%s: mkex profile does not support ucast flow\n", 1283 __func__); 1284 return NPC_FLOW_NOT_SUPPORTED; 1285 } 1286 1287 if (!npc_is_field_present(rvu, NPC_LXMB, req->intf)) { 1288 dev_warn(rvu->dev, 1289 "%s: mkex profile does not support bcast/mcast flow", 1290 __func__); 1291 return NPC_FLOW_NOT_SUPPORTED; 1292 } 1293 1294 /* Modify feature to use LXMB instead of DMAC */ 1295 req->features &= ~BIT_ULL(NPC_DMAC); 1296 req->features |= BIT_ULL(NPC_LXMB); 1297 } 1298 1299 process_flow: 1300 if (from_vf && req->default_rule) 1301 return NPC_FLOW_VF_PERM_DENIED; 1302 1303 /* Each PF/VF info is maintained in struct rvu_pfvf. 1304 * rvu_pfvf for the target PF/VF needs to be retrieved 1305 * hence modify pcifunc accordingly. 1306 */ 1307 1308 /* AF installing for a PF/VF */ 1309 if (!req->hdr.pcifunc) 1310 target = req->vf; 1311 /* PF installing for its VF */ 1312 else if (!from_vf && req->vf) { 1313 target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf; 1314 pf_set_vfs_mac = req->default_rule && 1315 (req->features & BIT_ULL(NPC_DMAC)); 1316 } 1317 /* msg received from PF/VF */ 1318 else 1319 target = req->hdr.pcifunc; 1320 1321 /* ignore chan_mask in case pf func is not AF, revisit later */ 1322 if (!is_pffunc_af(req->hdr.pcifunc)) 1323 req->chan_mask = 0xFFF; 1324 1325 err = npc_check_unsupported_flows(rvu, req->features, req->intf); 1326 if (err) 1327 return NPC_FLOW_NOT_SUPPORTED; 1328 1329 pfvf = rvu_get_pfvf(rvu, target); 1330 1331 /* PF installing for its VF */ 1332 if (req->hdr.pcifunc && !from_vf && req->vf) 1333 set_bit(PF_SET_VF_CFG, &pfvf->flags); 1334 1335 /* update req destination mac addr */ 1336 if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) && 1337 is_zero_ether_addr(req->packet.dmac)) { 1338 ether_addr_copy(req->packet.dmac, pfvf->mac_addr); 1339 eth_broadcast_addr((u8 *)&req->mask.dmac); 1340 } 1341 1342 /* Proceed if NIXLF is attached or not for TX rules */ 1343 err = nix_get_nixlf(rvu, target, &nixlf, NULL); 1344 if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac) 1345 return NPC_FLOW_NO_NIXLF; 1346 1347 /* don't enable rule when nixlf not attached or initialized */ 1348 if (!(is_nixlf_attached(rvu, target) && 1349 test_bit(NIXLF_INITIALIZED, &pfvf->flags))) 1350 enable = false; 1351 1352 /* Packets reaching NPC in Tx path implies that a 1353 * NIXLF is properly setup and transmitting. 1354 * Hence rules can be enabled for Tx. 1355 */ 1356 if (is_npc_intf_tx(req->intf)) 1357 enable = true; 1358 1359 /* Do not allow requests from uninitialized VFs */ 1360 if (from_vf && !enable) 1361 return NPC_FLOW_VF_NOT_INIT; 1362 1363 /* PF sets VF mac & VF NIXLF is not attached, update the mac addr */ 1364 if (pf_set_vfs_mac && !enable) { 1365 ether_addr_copy(pfvf->default_mac, req->packet.dmac); 1366 ether_addr_copy(pfvf->mac_addr, req->packet.dmac); 1367 set_bit(PF_SET_VF_MAC, &pfvf->flags); 1368 return 0; 1369 } 1370 1371 mutex_lock(&rswitch->switch_lock); 1372 err = npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, 1373 req, rsp, enable, pf_set_vfs_mac); 1374 mutex_unlock(&rswitch->switch_lock); 1375 1376 return err; 1377 } 1378 1379 static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule, 1380 u16 pcifunc) 1381 { 1382 struct npc_mcam_ena_dis_entry_req dis_req = { 0 }; 1383 struct msg_rsp dis_rsp; 1384 1385 if (rule->default_rule) 1386 return 0; 1387 1388 if (rule->has_cntr) 1389 rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule); 1390 1391 dis_req.hdr.pcifunc = pcifunc; 1392 dis_req.entry = rule->entry; 1393 1394 list_del(&rule->list); 1395 kfree(rule); 1396 1397 return rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp); 1398 } 1399 1400 int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu, 1401 struct npc_delete_flow_req *req, 1402 struct msg_rsp *rsp) 1403 { 1404 struct npc_mcam *mcam = &rvu->hw->mcam; 1405 struct rvu_npc_mcam_rule *iter, *tmp; 1406 u16 pcifunc = req->hdr.pcifunc; 1407 struct list_head del_list; 1408 1409 INIT_LIST_HEAD(&del_list); 1410 1411 mutex_lock(&mcam->lock); 1412 list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) { 1413 if (iter->owner == pcifunc) { 1414 /* All rules */ 1415 if (req->all) { 1416 list_move_tail(&iter->list, &del_list); 1417 /* Range of rules */ 1418 } else if (req->end && iter->entry >= req->start && 1419 iter->entry <= req->end) { 1420 list_move_tail(&iter->list, &del_list); 1421 /* single rule */ 1422 } else if (req->entry == iter->entry) { 1423 list_move_tail(&iter->list, &del_list); 1424 break; 1425 } 1426 } 1427 } 1428 mutex_unlock(&mcam->lock); 1429 1430 list_for_each_entry_safe(iter, tmp, &del_list, list) { 1431 u16 entry = iter->entry; 1432 1433 /* clear the mcam entry target pcifunc */ 1434 mcam->entry2target_pffunc[entry] = 0x0; 1435 if (npc_delete_flow(rvu, iter, pcifunc)) 1436 dev_err(rvu->dev, "rule deletion failed for entry:%u", 1437 entry); 1438 } 1439 1440 return 0; 1441 } 1442 1443 static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr, 1444 struct rvu_npc_mcam_rule *rule, 1445 struct rvu_pfvf *pfvf) 1446 { 1447 struct npc_mcam_write_entry_req write_req = { 0 }; 1448 struct mcam_entry *entry = &write_req.entry_data; 1449 struct npc_mcam *mcam = &rvu->hw->mcam; 1450 struct msg_rsp rsp; 1451 u8 intf, enable; 1452 int err; 1453 1454 ether_addr_copy(rule->packet.dmac, pfvf->mac_addr); 1455 1456 npc_read_mcam_entry(rvu, mcam, npcblkaddr, rule->entry, 1457 entry, &intf, &enable); 1458 1459 npc_update_entry(rvu, NPC_DMAC, entry, 1460 ether_addr_to_u64(pfvf->mac_addr), 0, 1461 0xffffffffffffull, 0, intf); 1462 1463 write_req.hdr.pcifunc = rule->owner; 1464 write_req.entry = rule->entry; 1465 write_req.intf = pfvf->nix_rx_intf; 1466 1467 mutex_unlock(&mcam->lock); 1468 err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp); 1469 mutex_lock(&mcam->lock); 1470 1471 return err; 1472 } 1473 1474 void npc_mcam_enable_flows(struct rvu *rvu, u16 target) 1475 { 1476 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target); 1477 struct rvu_npc_mcam_rule *def_ucast_rule; 1478 struct npc_mcam *mcam = &rvu->hw->mcam; 1479 struct rvu_npc_mcam_rule *rule; 1480 int blkaddr, bank, index; 1481 u64 def_action; 1482 1483 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1484 if (blkaddr < 0) 1485 return; 1486 1487 def_ucast_rule = pfvf->def_ucast_rule; 1488 1489 mutex_lock(&mcam->lock); 1490 list_for_each_entry(rule, &mcam->mcam_rules, list) { 1491 if (is_npc_intf_rx(rule->intf) && 1492 rule->rx_action.pf_func == target && !rule->enable) { 1493 if (rule->default_rule) { 1494 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1495 rule->entry, true); 1496 rule->enable = true; 1497 continue; 1498 } 1499 1500 if (rule->vfvlan_cfg) 1501 npc_update_dmac_value(rvu, blkaddr, rule, pfvf); 1502 1503 if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) { 1504 if (!def_ucast_rule) 1505 continue; 1506 /* Use default unicast entry action */ 1507 rule->rx_action = def_ucast_rule->rx_action; 1508 def_action = *(u64 *)&def_ucast_rule->rx_action; 1509 bank = npc_get_bank(mcam, rule->entry); 1510 rvu_write64(rvu, blkaddr, 1511 NPC_AF_MCAMEX_BANKX_ACTION 1512 (rule->entry, bank), def_action); 1513 } 1514 1515 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1516 rule->entry, true); 1517 rule->enable = true; 1518 } 1519 } 1520 1521 /* Enable MCAM entries installed by PF with target as VF pcifunc */ 1522 for (index = 0; index < mcam->bmap_entries; index++) { 1523 if (mcam->entry2target_pffunc[index] == target) 1524 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1525 index, true); 1526 } 1527 mutex_unlock(&mcam->lock); 1528 } 1529 1530 void npc_mcam_disable_flows(struct rvu *rvu, u16 target) 1531 { 1532 struct npc_mcam *mcam = &rvu->hw->mcam; 1533 int blkaddr, index; 1534 1535 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1536 if (blkaddr < 0) 1537 return; 1538 1539 mutex_lock(&mcam->lock); 1540 /* Disable MCAM entries installed by PF with target as VF pcifunc */ 1541 for (index = 0; index < mcam->bmap_entries; index++) { 1542 if (mcam->entry2target_pffunc[index] == target) 1543 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1544 index, false); 1545 } 1546 mutex_unlock(&mcam->lock); 1547 } 1548 1549 /* single drop on non hit rule starting from 0th index. This an extension 1550 * to RPM mac filter to support more rules. 1551 */ 1552 int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx, 1553 u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask, 1554 u64 bcast_mcast_val, u64 bcast_mcast_mask) 1555 { 1556 struct npc_mcam_alloc_counter_req cntr_req = { 0 }; 1557 struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; 1558 struct npc_mcam_write_entry_req req = { 0 }; 1559 struct npc_mcam *mcam = &rvu->hw->mcam; 1560 struct rvu_npc_mcam_rule *rule; 1561 struct msg_rsp rsp; 1562 bool enabled; 1563 int blkaddr; 1564 int err; 1565 1566 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1567 if (blkaddr < 0) { 1568 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1569 return -ENODEV; 1570 } 1571 1572 /* Bail out if no exact match support */ 1573 if (!rvu_npc_exact_has_match_table(rvu)) { 1574 dev_info(rvu->dev, "%s: No support for exact match feature\n", __func__); 1575 return -EINVAL; 1576 } 1577 1578 /* If 0th entry is already used, return err */ 1579 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_idx); 1580 if (enabled) { 1581 dev_err(rvu->dev, "%s: failed to add single drop on non hit rule at %d th index\n", 1582 __func__, mcam_idx); 1583 return -EINVAL; 1584 } 1585 1586 /* Add this entry to mcam rules list */ 1587 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 1588 if (!rule) 1589 return -ENOMEM; 1590 1591 /* Disable rule by default. Enable rule when first dmac filter is 1592 * installed 1593 */ 1594 rule->enable = false; 1595 rule->chan = chan_val; 1596 rule->chan_mask = chan_mask; 1597 rule->entry = mcam_idx; 1598 rvu_mcam_add_rule(mcam, rule); 1599 1600 /* Reserve slot 0 */ 1601 npc_mcam_rsrcs_reserve(rvu, blkaddr, mcam_idx); 1602 1603 /* Allocate counter for this single drop on non hit rule */ 1604 cntr_req.hdr.pcifunc = 0; /* AF request */ 1605 cntr_req.contig = true; 1606 cntr_req.count = 1; 1607 err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); 1608 if (err) { 1609 dev_err(rvu->dev, "%s: Err to allocate cntr for drop rule (err=%d)\n", 1610 __func__, err); 1611 return -EFAULT; 1612 } 1613 *counter_idx = cntr_rsp.cntr; 1614 1615 /* Fill in fields for this mcam entry */ 1616 npc_update_entry(rvu, NPC_EXACT_RESULT, &req.entry_data, exact_val, 0, 1617 exact_mask, 0, NIX_INTF_RX); 1618 npc_update_entry(rvu, NPC_CHAN, &req.entry_data, chan_val, 0, 1619 chan_mask, 0, NIX_INTF_RX); 1620 npc_update_entry(rvu, NPC_LXMB, &req.entry_data, bcast_mcast_val, 0, 1621 bcast_mcast_mask, 0, NIX_INTF_RX); 1622 1623 req.intf = NIX_INTF_RX; 1624 req.set_cntr = true; 1625 req.cntr = cntr_rsp.cntr; 1626 req.entry = mcam_idx; 1627 1628 err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &req, &rsp); 1629 if (err) { 1630 dev_err(rvu->dev, "%s: Installation of single drop on non hit rule at %d failed\n", 1631 __func__, mcam_idx); 1632 return err; 1633 } 1634 1635 dev_err(rvu->dev, "%s: Installed single drop on non hit rule at %d, cntr=%d\n", 1636 __func__, mcam_idx, req.cntr); 1637 1638 /* disable entry at Bank 0, index 0 */ 1639 npc_enable_mcam_entry(rvu, mcam, blkaddr, mcam_idx, false); 1640 1641 return 0; 1642 } 1643 1644 int rvu_mbox_handler_npc_get_field_status(struct rvu *rvu, 1645 struct npc_get_field_status_req *req, 1646 struct npc_get_field_status_rsp *rsp) 1647 { 1648 int blkaddr; 1649 1650 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1651 if (blkaddr < 0) 1652 return NPC_MCAM_INVALID_REQ; 1653 1654 if (!is_npc_interface_valid(rvu, req->intf)) 1655 return NPC_FLOW_INTF_INVALID; 1656 1657 if (npc_check_field(rvu, blkaddr, req->field, req->intf)) 1658 rsp->enable = 1; 1659 1660 return 0; 1661 } 1662