1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2020 Marvell. 5 */ 6 7 #include <linux/bitfield.h> 8 9 #include "rvu_struct.h" 10 #include "rvu_reg.h" 11 #include "rvu.h" 12 #include "npc.h" 13 14 #define NPC_BYTESM GENMASK_ULL(19, 16) 15 #define NPC_HDR_OFFSET GENMASK_ULL(15, 8) 16 #define NPC_KEY_OFFSET GENMASK_ULL(5, 0) 17 #define NPC_LDATA_EN BIT_ULL(7) 18 19 static const char * const npc_flow_names[] = { 20 [NPC_DMAC] = "dmac", 21 [NPC_SMAC] = "smac", 22 [NPC_ETYPE] = "ether type", 23 [NPC_OUTER_VID] = "outer vlan id", 24 [NPC_TOS] = "tos", 25 [NPC_SIP_IPV4] = "ipv4 source ip", 26 [NPC_DIP_IPV4] = "ipv4 destination ip", 27 [NPC_SIP_IPV6] = "ipv6 source ip", 28 [NPC_DIP_IPV6] = "ipv6 destination ip", 29 [NPC_SPORT_TCP] = "tcp source port", 30 [NPC_DPORT_TCP] = "tcp destination port", 31 [NPC_SPORT_UDP] = "udp source port", 32 [NPC_DPORT_UDP] = "udp destination port", 33 [NPC_SPORT_SCTP] = "sctp source port", 34 [NPC_DPORT_SCTP] = "sctp destination port", 35 [NPC_UNKNOWN] = "unknown", 36 }; 37 38 const char *npc_get_field_name(u8 hdr) 39 { 40 if (hdr >= ARRAY_SIZE(npc_flow_names)) 41 return npc_flow_names[NPC_UNKNOWN]; 42 43 return npc_flow_names[hdr]; 44 } 45 46 /* Compute keyword masks and figure out the number of keywords a field 47 * spans in the key. 48 */ 49 static void npc_set_kw_masks(struct npc_mcam *mcam, u8 type, 50 u8 nr_bits, int start_kwi, int offset, u8 intf) 51 { 52 struct npc_key_field *field = &mcam->rx_key_fields[type]; 53 u8 bits_in_kw; 54 int max_kwi; 55 56 if (mcam->banks_per_entry == 1) 57 max_kwi = 1; /* NPC_MCAM_KEY_X1 */ 58 else if (mcam->banks_per_entry == 2) 59 max_kwi = 3; /* NPC_MCAM_KEY_X2 */ 60 else 61 max_kwi = 6; /* NPC_MCAM_KEY_X4 */ 62 63 if (is_npc_intf_tx(intf)) 64 field = &mcam->tx_key_fields[type]; 65 66 if (offset + nr_bits <= 64) { 67 /* one KW only */ 68 if (start_kwi > max_kwi) 69 return; 70 field->kw_mask[start_kwi] |= GENMASK_ULL(nr_bits - 1, 0) 71 << offset; 72 field->nr_kws = 1; 73 } else if (offset + nr_bits > 64 && 74 offset + nr_bits <= 128) { 75 /* two KWs */ 76 if (start_kwi + 1 > max_kwi) 77 return; 78 /* first KW mask */ 79 bits_in_kw = 64 - offset; 80 field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0) 81 << offset; 82 /* second KW mask i.e. mask for rest of bits */ 83 bits_in_kw = nr_bits + offset - 64; 84 field->kw_mask[start_kwi + 1] |= GENMASK_ULL(bits_in_kw - 1, 0); 85 field->nr_kws = 2; 86 } else { 87 /* three KWs */ 88 if (start_kwi + 2 > max_kwi) 89 return; 90 /* first KW mask */ 91 bits_in_kw = 64 - offset; 92 field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0) 93 << offset; 94 /* second KW mask */ 95 field->kw_mask[start_kwi + 1] = ~0ULL; 96 /* third KW mask i.e. mask for rest of bits */ 97 bits_in_kw = nr_bits + offset - 128; 98 field->kw_mask[start_kwi + 2] |= GENMASK_ULL(bits_in_kw - 1, 0); 99 field->nr_kws = 3; 100 } 101 } 102 103 /* Helper function to figure out whether field exists in the key */ 104 static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf) 105 { 106 struct npc_mcam *mcam = &rvu->hw->mcam; 107 struct npc_key_field *input; 108 109 input = &mcam->rx_key_fields[type]; 110 if (is_npc_intf_tx(intf)) 111 input = &mcam->tx_key_fields[type]; 112 113 return input->nr_kws > 0; 114 } 115 116 static bool npc_is_same(struct npc_key_field *input, 117 struct npc_key_field *field) 118 { 119 int ret; 120 121 ret = memcmp(&input->layer_mdata, &field->layer_mdata, 122 sizeof(struct npc_layer_mdata)); 123 return ret == 0; 124 } 125 126 static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type, 127 u64 cfg, u8 lid, u8 lt, u8 intf) 128 { 129 struct npc_key_field *input = &mcam->rx_key_fields[type]; 130 131 if (is_npc_intf_tx(intf)) 132 input = &mcam->tx_key_fields[type]; 133 134 input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); 135 input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg); 136 input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1; 137 input->layer_mdata.ltype = lt; 138 input->layer_mdata.lid = lid; 139 } 140 141 static bool npc_check_overlap_fields(struct npc_key_field *input1, 142 struct npc_key_field *input2) 143 { 144 int kwi; 145 146 /* Fields with same layer id and different ltypes are mutually 147 * exclusive hence they can be overlapped 148 */ 149 if (input1->layer_mdata.lid == input2->layer_mdata.lid && 150 input1->layer_mdata.ltype != input2->layer_mdata.ltype) 151 return false; 152 153 for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) { 154 if (input1->kw_mask[kwi] & input2->kw_mask[kwi]) 155 return true; 156 } 157 158 return false; 159 } 160 161 /* Helper function to check whether given field overlaps with any other fields 162 * in the key. Due to limitations on key size and the key extraction profile in 163 * use higher layers can overwrite lower layer's header fields. Hence overlap 164 * needs to be checked. 165 */ 166 static bool npc_check_overlap(struct rvu *rvu, int blkaddr, 167 enum key_fields type, u8 start_lid, u8 intf) 168 { 169 struct npc_mcam *mcam = &rvu->hw->mcam; 170 struct npc_key_field *dummy, *input; 171 int start_kwi, offset; 172 u8 nr_bits, lid, lt, ld; 173 u64 cfg; 174 175 dummy = &mcam->rx_key_fields[NPC_UNKNOWN]; 176 input = &mcam->rx_key_fields[type]; 177 178 if (is_npc_intf_tx(intf)) { 179 dummy = &mcam->tx_key_fields[NPC_UNKNOWN]; 180 input = &mcam->tx_key_fields[type]; 181 } 182 183 for (lid = start_lid; lid < NPC_MAX_LID; lid++) { 184 for (lt = 0; lt < NPC_MAX_LT; lt++) { 185 for (ld = 0; ld < NPC_MAX_LD; ld++) { 186 cfg = rvu_read64(rvu, blkaddr, 187 NPC_AF_INTFX_LIDX_LTX_LDX_CFG 188 (intf, lid, lt, ld)); 189 if (!FIELD_GET(NPC_LDATA_EN, cfg)) 190 continue; 191 memset(dummy, 0, sizeof(struct npc_key_field)); 192 npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg, 193 lid, lt, intf); 194 /* exclude input */ 195 if (npc_is_same(input, dummy)) 196 continue; 197 start_kwi = dummy->layer_mdata.key / 8; 198 offset = (dummy->layer_mdata.key * 8) % 64; 199 nr_bits = dummy->layer_mdata.len * 8; 200 /* form KW masks */ 201 npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits, 202 start_kwi, offset, intf); 203 /* check any input field bits falls in any 204 * other field bits. 205 */ 206 if (npc_check_overlap_fields(dummy, input)) 207 return true; 208 } 209 } 210 } 211 212 return false; 213 } 214 215 static int npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type, 216 u8 intf) 217 { 218 if (!npc_is_field_present(rvu, type, intf) || 219 npc_check_overlap(rvu, blkaddr, type, 0, intf)) 220 return -EOPNOTSUPP; 221 return 0; 222 } 223 224 static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number, 225 u8 key_nibble, u8 intf) 226 { 227 u8 offset = (key_nibble * 4) % 64; /* offset within key word */ 228 u8 kwi = (key_nibble * 4) / 64; /* which word in key */ 229 u8 nr_bits = 4; /* bits in a nibble */ 230 u8 type; 231 232 switch (bit_number) { 233 case 0 ... 2: 234 type = NPC_CHAN; 235 break; 236 case 3: 237 type = NPC_ERRLEV; 238 break; 239 case 4 ... 5: 240 type = NPC_ERRCODE; 241 break; 242 case 6: 243 type = NPC_LXMB; 244 break; 245 /* check for LTYPE only as of now */ 246 case 9: 247 type = NPC_LA; 248 break; 249 case 12: 250 type = NPC_LB; 251 break; 252 case 15: 253 type = NPC_LC; 254 break; 255 case 18: 256 type = NPC_LD; 257 break; 258 case 21: 259 type = NPC_LE; 260 break; 261 case 24: 262 type = NPC_LF; 263 break; 264 case 27: 265 type = NPC_LG; 266 break; 267 case 30: 268 type = NPC_LH; 269 break; 270 default: 271 return; 272 }; 273 npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf); 274 } 275 276 static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf) 277 { 278 struct npc_mcam *mcam = &rvu->hw->mcam; 279 struct npc_key_field *key_fields; 280 /* Ether type can come from three layers 281 * (ethernet, single tagged, double tagged) 282 */ 283 struct npc_key_field *etype_ether; 284 struct npc_key_field *etype_tag1; 285 struct npc_key_field *etype_tag2; 286 /* Outer VLAN TCI can come from two layers 287 * (single tagged, double tagged) 288 */ 289 struct npc_key_field *vlan_tag1; 290 struct npc_key_field *vlan_tag2; 291 u64 *features; 292 u8 start_lid; 293 int i; 294 295 key_fields = mcam->rx_key_fields; 296 features = &mcam->rx_features; 297 298 if (is_npc_intf_tx(intf)) { 299 key_fields = mcam->tx_key_fields; 300 features = &mcam->tx_features; 301 } 302 303 /* Handle header fields which can come from multiple layers like 304 * etype, outer vlan tci. These fields should have same position in 305 * the key otherwise to install a mcam rule more than one entry is 306 * needed which complicates mcam space management. 307 */ 308 etype_ether = &key_fields[NPC_ETYPE_ETHER]; 309 etype_tag1 = &key_fields[NPC_ETYPE_TAG1]; 310 etype_tag2 = &key_fields[NPC_ETYPE_TAG2]; 311 vlan_tag1 = &key_fields[NPC_VLAN_TAG1]; 312 vlan_tag2 = &key_fields[NPC_VLAN_TAG2]; 313 314 /* if key profile programmed does not extract Ethertype at all */ 315 if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) 316 goto vlan_tci; 317 318 /* if key profile programmed extracts Ethertype from one layer */ 319 if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) 320 key_fields[NPC_ETYPE] = *etype_ether; 321 if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws) 322 key_fields[NPC_ETYPE] = *etype_tag1; 323 if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws) 324 key_fields[NPC_ETYPE] = *etype_tag2; 325 326 /* if key profile programmed extracts Ethertype from multiple layers */ 327 if (etype_ether->nr_kws && etype_tag1->nr_kws) { 328 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 329 if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) 330 goto vlan_tci; 331 } 332 key_fields[NPC_ETYPE] = *etype_tag1; 333 } 334 if (etype_ether->nr_kws && etype_tag2->nr_kws) { 335 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 336 if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) 337 goto vlan_tci; 338 } 339 key_fields[NPC_ETYPE] = *etype_tag2; 340 } 341 if (etype_tag1->nr_kws && etype_tag2->nr_kws) { 342 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 343 if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) 344 goto vlan_tci; 345 } 346 key_fields[NPC_ETYPE] = *etype_tag2; 347 } 348 349 /* check none of higher layers overwrite Ethertype */ 350 start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1; 351 if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) 352 goto vlan_tci; 353 *features |= BIT_ULL(NPC_ETYPE); 354 vlan_tci: 355 /* if key profile does not extract outer vlan tci at all */ 356 if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) 357 goto done; 358 359 /* if key profile extracts outer vlan tci from one layer */ 360 if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws) 361 key_fields[NPC_OUTER_VID] = *vlan_tag1; 362 if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws) 363 key_fields[NPC_OUTER_VID] = *vlan_tag2; 364 365 /* if key profile extracts outer vlan tci from multiple layers */ 366 if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) { 367 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 368 if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) 369 goto done; 370 } 371 key_fields[NPC_OUTER_VID] = *vlan_tag2; 372 } 373 /* check none of higher layers overwrite outer vlan tci */ 374 start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1; 375 if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) 376 goto done; 377 *features |= BIT_ULL(NPC_OUTER_VID); 378 done: 379 return; 380 } 381 382 static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid, 383 u8 lt, u64 cfg, u8 intf) 384 { 385 struct npc_mcam *mcam = &rvu->hw->mcam; 386 u8 hdr, key, nr_bytes, bit_offset; 387 u8 la_ltype, la_start; 388 /* starting KW index and starting bit position */ 389 int start_kwi, offset; 390 391 nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1; 392 hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); 393 key = FIELD_GET(NPC_KEY_OFFSET, cfg); 394 start_kwi = key / 8; 395 offset = (key * 8) % 64; 396 397 /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding 398 * ethernet header. 399 */ 400 if (is_npc_intf_tx(intf)) { 401 la_ltype = NPC_LT_LA_IH_NIX_ETHER; 402 la_start = 8; 403 } else { 404 la_ltype = NPC_LT_LA_ETHER; 405 la_start = 0; 406 } 407 408 #define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \ 409 do { \ 410 if (lid == (hlid) && lt == (hlt)) { \ 411 if ((hstart) >= hdr && \ 412 ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \ 413 bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \ 414 npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \ 415 npc_set_kw_masks(mcam, (name), (hlen) * 8, \ 416 start_kwi, offset + bit_offset, intf);\ 417 } \ 418 } \ 419 } while (0) 420 421 /* List LID, LTYPE, start offset from layer and length(in bytes) of 422 * packet header fields below. 423 * Example: Source IP is 4 bytes and starts at 12th byte of IP header 424 */ 425 NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4); 426 NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4); 427 NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); 428 NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16); 429 NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2); 430 NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2); 431 NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2); 432 NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2); 433 NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2); 434 NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2); 435 NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2); 436 NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2); 437 NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2); 438 NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2); 439 NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2); 440 NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6); 441 NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6); 442 /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */ 443 NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2); 444 } 445 446 static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) 447 { 448 struct npc_mcam *mcam = &rvu->hw->mcam; 449 u64 *features = &mcam->rx_features; 450 u64 tcp_udp_sctp; 451 int err, hdr; 452 453 if (is_npc_intf_tx(intf)) 454 features = &mcam->tx_features; 455 456 for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) { 457 err = npc_check_field(rvu, blkaddr, hdr, intf); 458 if (!err) 459 *features |= BIT_ULL(hdr); 460 } 461 462 tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) | 463 BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) | 464 BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP); 465 466 /* for tcp/udp/sctp corresponding layer type should be in the key */ 467 if (*features & tcp_udp_sctp) 468 if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) 469 *features &= ~tcp_udp_sctp; 470 471 /* for vlan corresponding layer type should be in the key */ 472 if (*features & BIT_ULL(NPC_OUTER_VID)) 473 if (npc_check_field(rvu, blkaddr, NPC_LB, intf)) 474 *features &= ~BIT_ULL(NPC_OUTER_VID); 475 } 476 477 /* Scan key extraction profile and record how fields of our interest 478 * fill the key structure. Also verify Channel and DMAC exists in 479 * key and not overwritten by other header fields. 480 */ 481 static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf) 482 { 483 struct npc_mcam *mcam = &rvu->hw->mcam; 484 u8 lid, lt, ld, bitnr; 485 u8 key_nibble = 0; 486 u64 cfg; 487 488 /* Scan and note how parse result is going to be in key. 489 * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from 490 * parse result in the key. The enabled nibbles from parse result 491 * will be concatenated in key. 492 */ 493 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf)); 494 cfg &= NPC_PARSE_NIBBLE; 495 for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) { 496 npc_scan_parse_result(mcam, bitnr, key_nibble, intf); 497 key_nibble++; 498 } 499 500 /* Scan and note how layer data is going to be in key */ 501 for (lid = 0; lid < NPC_MAX_LID; lid++) { 502 for (lt = 0; lt < NPC_MAX_LT; lt++) { 503 for (ld = 0; ld < NPC_MAX_LD; ld++) { 504 cfg = rvu_read64(rvu, blkaddr, 505 NPC_AF_INTFX_LIDX_LTX_LDX_CFG 506 (intf, lid, lt, ld)); 507 if (!FIELD_GET(NPC_LDATA_EN, cfg)) 508 continue; 509 npc_scan_ldata(rvu, blkaddr, lid, lt, cfg, 510 intf); 511 } 512 } 513 } 514 515 return 0; 516 } 517 518 static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr) 519 { 520 int err; 521 522 err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX); 523 if (err) 524 return err; 525 526 err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX); 527 if (err) 528 return err; 529 530 /* Channel is mandatory */ 531 if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) { 532 dev_err(rvu->dev, "Channel not present in Key\n"); 533 return -EINVAL; 534 } 535 /* check that none of the fields overwrite channel */ 536 if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) { 537 dev_err(rvu->dev, "Channel cannot be overwritten\n"); 538 return -EINVAL; 539 } 540 /* DMAC should be present in key for unicast filter to work */ 541 if (!npc_is_field_present(rvu, NPC_DMAC, NIX_INTF_RX)) { 542 dev_err(rvu->dev, "DMAC not present in Key\n"); 543 return -EINVAL; 544 } 545 /* check that none of the fields overwrite DMAC */ 546 if (npc_check_overlap(rvu, blkaddr, NPC_DMAC, 0, NIX_INTF_RX)) { 547 dev_err(rvu->dev, "DMAC cannot be overwritten\n"); 548 return -EINVAL; 549 } 550 551 npc_set_features(rvu, blkaddr, NIX_INTF_TX); 552 npc_set_features(rvu, blkaddr, NIX_INTF_RX); 553 npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX); 554 npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX); 555 556 return 0; 557 } 558 559 int npc_flow_steering_init(struct rvu *rvu, int blkaddr) 560 { 561 struct npc_mcam *mcam = &rvu->hw->mcam; 562 563 INIT_LIST_HEAD(&mcam->mcam_rules); 564 565 return npc_scan_verify_kex(rvu, blkaddr); 566 } 567 568 static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf) 569 { 570 struct npc_mcam *mcam = &rvu->hw->mcam; 571 u64 *mcam_features = &mcam->rx_features; 572 u64 unsupported; 573 u8 bit; 574 575 if (is_npc_intf_tx(intf)) 576 mcam_features = &mcam->tx_features; 577 578 unsupported = (*mcam_features ^ features) & ~(*mcam_features); 579 if (unsupported) { 580 dev_info(rvu->dev, "Unsupported flow(s):\n"); 581 for_each_set_bit(bit, (unsigned long *)&unsupported, 64) 582 dev_info(rvu->dev, "%s ", npc_get_field_name(bit)); 583 return -EOPNOTSUPP; 584 } 585 586 return 0; 587 } 588 589 /* npc_update_entry - Based on the masks generated during 590 * the key scanning, updates the given entry with value and 591 * masks for the field of interest. Maximum 16 bytes of a packet 592 * header can be extracted by HW hence lo and hi are sufficient. 593 * When field bytes are less than or equal to 8 then hi should be 594 * 0 for value and mask. 595 * 596 * If exact match of value is required then mask should be all 1's. 597 * If any bits in mask are 0 then corresponding bits in value are 598 * dont care. 599 */ 600 static void npc_update_entry(struct rvu *rvu, enum key_fields type, 601 struct mcam_entry *entry, u64 val_lo, 602 u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf) 603 { 604 struct npc_mcam *mcam = &rvu->hw->mcam; 605 struct mcam_entry dummy = { {0} }; 606 struct npc_key_field *field; 607 u64 kw1, kw2, kw3; 608 u8 shift; 609 int i; 610 611 field = &mcam->rx_key_fields[type]; 612 if (is_npc_intf_tx(intf)) 613 field = &mcam->tx_key_fields[type]; 614 615 if (!field->nr_kws) 616 return; 617 618 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 619 if (!field->kw_mask[i]) 620 continue; 621 /* place key value in kw[x] */ 622 shift = __ffs64(field->kw_mask[i]); 623 /* update entry value */ 624 kw1 = (val_lo << shift) & field->kw_mask[i]; 625 dummy.kw[i] = kw1; 626 /* update entry mask */ 627 kw1 = (mask_lo << shift) & field->kw_mask[i]; 628 dummy.kw_mask[i] = kw1; 629 630 if (field->nr_kws == 1) 631 break; 632 /* place remaining bits of key value in kw[x + 1] */ 633 if (field->nr_kws == 2) { 634 /* update entry value */ 635 kw2 = shift ? val_lo >> (64 - shift) : 0; 636 kw2 |= (val_hi << shift); 637 kw2 &= field->kw_mask[i + 1]; 638 dummy.kw[i + 1] = kw2; 639 /* update entry mask */ 640 kw2 = shift ? mask_lo >> (64 - shift) : 0; 641 kw2 |= (mask_hi << shift); 642 kw2 &= field->kw_mask[i + 1]; 643 dummy.kw_mask[i + 1] = kw2; 644 break; 645 } 646 /* place remaining bits of key value in kw[x + 1], kw[x + 2] */ 647 if (field->nr_kws == 3) { 648 /* update entry value */ 649 kw2 = shift ? val_lo >> (64 - shift) : 0; 650 kw2 |= (val_hi << shift); 651 kw2 &= field->kw_mask[i + 1]; 652 kw3 = shift ? val_hi >> (64 - shift) : 0; 653 kw3 &= field->kw_mask[i + 2]; 654 dummy.kw[i + 1] = kw2; 655 dummy.kw[i + 2] = kw3; 656 /* update entry mask */ 657 kw2 = shift ? mask_lo >> (64 - shift) : 0; 658 kw2 |= (mask_hi << shift); 659 kw2 &= field->kw_mask[i + 1]; 660 kw3 = shift ? mask_hi >> (64 - shift) : 0; 661 kw3 &= field->kw_mask[i + 2]; 662 dummy.kw_mask[i + 1] = kw2; 663 dummy.kw_mask[i + 2] = kw3; 664 break; 665 } 666 } 667 /* dummy is ready with values and masks for given key 668 * field now clear and update input entry with those 669 */ 670 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { 671 if (!field->kw_mask[i]) 672 continue; 673 entry->kw[i] &= ~field->kw_mask[i]; 674 entry->kw_mask[i] &= ~field->kw_mask[i]; 675 676 entry->kw[i] |= dummy.kw[i]; 677 entry->kw_mask[i] |= dummy.kw_mask[i]; 678 } 679 } 680 681 #define IPV6_WORDS 4 682 683 static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry, 684 u64 features, struct flow_msg *pkt, 685 struct flow_msg *mask, 686 struct rvu_npc_mcam_rule *output, u8 intf) 687 { 688 u32 src_ip[IPV6_WORDS], src_ip_mask[IPV6_WORDS]; 689 u32 dst_ip[IPV6_WORDS], dst_ip_mask[IPV6_WORDS]; 690 struct flow_msg *opkt = &output->packet; 691 struct flow_msg *omask = &output->mask; 692 u64 mask_lo, mask_hi; 693 u64 val_lo, val_hi; 694 695 /* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet 696 * values to be programmed in MCAM should as below: 697 * val_high: 0xfe80000000000000 698 * val_low: 0x2c6863fffe5e2d0a 699 */ 700 if (features & BIT_ULL(NPC_SIP_IPV6)) { 701 be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS); 702 be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS); 703 704 mask_hi = (u64)src_ip_mask[0] << 32 | src_ip_mask[1]; 705 mask_lo = (u64)src_ip_mask[2] << 32 | src_ip_mask[3]; 706 val_hi = (u64)src_ip[0] << 32 | src_ip[1]; 707 val_lo = (u64)src_ip[2] << 32 | src_ip[3]; 708 709 npc_update_entry(rvu, NPC_SIP_IPV6, entry, val_lo, val_hi, 710 mask_lo, mask_hi, intf); 711 memcpy(opkt->ip6src, pkt->ip6src, sizeof(opkt->ip6src)); 712 memcpy(omask->ip6src, mask->ip6src, sizeof(omask->ip6src)); 713 } 714 if (features & BIT_ULL(NPC_DIP_IPV6)) { 715 be32_to_cpu_array(dst_ip_mask, mask->ip6dst, IPV6_WORDS); 716 be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS); 717 718 mask_hi = (u64)dst_ip_mask[0] << 32 | dst_ip_mask[1]; 719 mask_lo = (u64)dst_ip_mask[2] << 32 | dst_ip_mask[3]; 720 val_hi = (u64)dst_ip[0] << 32 | dst_ip[1]; 721 val_lo = (u64)dst_ip[2] << 32 | dst_ip[3]; 722 723 npc_update_entry(rvu, NPC_DIP_IPV6, entry, val_lo, val_hi, 724 mask_lo, mask_hi, intf); 725 memcpy(opkt->ip6dst, pkt->ip6dst, sizeof(opkt->ip6dst)); 726 memcpy(omask->ip6dst, mask->ip6dst, sizeof(omask->ip6dst)); 727 } 728 } 729 730 static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, 731 u64 features, struct flow_msg *pkt, 732 struct flow_msg *mask, 733 struct rvu_npc_mcam_rule *output, u8 intf) 734 { 735 u64 dmac_mask = ether_addr_to_u64(mask->dmac); 736 u64 smac_mask = ether_addr_to_u64(mask->smac); 737 u64 dmac_val = ether_addr_to_u64(pkt->dmac); 738 u64 smac_val = ether_addr_to_u64(pkt->smac); 739 struct flow_msg *opkt = &output->packet; 740 struct flow_msg *omask = &output->mask; 741 742 if (!features) 743 return; 744 745 /* For tcp/udp/sctp LTYPE should be present in entry */ 746 if (features & (BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_DPORT_TCP))) 747 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP, 748 0, ~0ULL, 0, intf); 749 if (features & (BIT_ULL(NPC_SPORT_UDP) | BIT_ULL(NPC_DPORT_UDP))) 750 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP, 751 0, ~0ULL, 0, intf); 752 if (features & (BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP))) 753 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP, 754 0, ~0ULL, 0, intf); 755 756 if (features & BIT_ULL(NPC_OUTER_VID)) 757 npc_update_entry(rvu, NPC_LB, entry, 758 NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0, 759 NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf); 760 761 #define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \ 762 do { \ 763 if (features & BIT_ULL((field))) { \ 764 npc_update_entry(rvu, (field), entry, (val_lo), (val_hi), \ 765 (mask_lo), (mask_hi), intf); \ 766 memcpy(&opkt->member, &pkt->member, sizeof(pkt->member)); \ 767 memcpy(&omask->member, &mask->member, sizeof(mask->member)); \ 768 } \ 769 } while (0) 770 771 NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0); 772 NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0); 773 NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0, 774 ntohs(mask->etype), 0); 775 NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0, 776 ntohl(mask->ip4src), 0); 777 NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0, 778 ntohl(mask->ip4dst), 0); 779 NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0, 780 ntohs(mask->sport), 0); 781 NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0, 782 ntohs(mask->sport), 0); 783 NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0, 784 ntohs(mask->dport), 0); 785 NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0, 786 ntohs(mask->dport), 0); 787 NPC_WRITE_FLOW(NPC_SPORT_SCTP, sport, ntohs(pkt->sport), 0, 788 ntohs(mask->sport), 0); 789 NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0, 790 ntohs(mask->dport), 0); 791 792 NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0, 793 ntohs(mask->vlan_tci), 0); 794 795 npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf); 796 } 797 798 static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, 799 u16 entry) 800 { 801 struct rvu_npc_mcam_rule *iter; 802 803 mutex_lock(&mcam->lock); 804 list_for_each_entry(iter, &mcam->mcam_rules, list) { 805 if (iter->entry == entry) { 806 mutex_unlock(&mcam->lock); 807 return iter; 808 } 809 } 810 mutex_unlock(&mcam->lock); 811 812 return NULL; 813 } 814 815 static void rvu_mcam_add_rule(struct npc_mcam *mcam, 816 struct rvu_npc_mcam_rule *rule) 817 { 818 struct list_head *head = &mcam->mcam_rules; 819 struct rvu_npc_mcam_rule *iter; 820 821 mutex_lock(&mcam->lock); 822 list_for_each_entry(iter, &mcam->mcam_rules, list) { 823 if (iter->entry > rule->entry) 824 break; 825 head = &iter->list; 826 } 827 828 list_add(&rule->list, head); 829 mutex_unlock(&mcam->lock); 830 } 831 832 static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc, 833 struct rvu_npc_mcam_rule *rule) 834 { 835 struct npc_mcam_oper_counter_req free_req = { 0 }; 836 struct msg_rsp free_rsp; 837 838 if (!rule->has_cntr) 839 return; 840 841 free_req.hdr.pcifunc = pcifunc; 842 free_req.cntr = rule->cntr; 843 844 rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp); 845 rule->has_cntr = false; 846 } 847 848 static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, 849 struct rvu_npc_mcam_rule *rule, 850 struct npc_install_flow_rsp *rsp) 851 { 852 struct npc_mcam_alloc_counter_req cntr_req = { 0 }; 853 struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; 854 int err; 855 856 cntr_req.hdr.pcifunc = pcifunc; 857 cntr_req.contig = true; 858 cntr_req.count = 1; 859 860 /* we try to allocate a counter to track the stats of this 861 * rule. If counter could not be allocated then proceed 862 * without counter because counters are limited than entries. 863 */ 864 err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, 865 &cntr_rsp); 866 if (!err && cntr_rsp.count) { 867 rule->cntr = cntr_rsp.cntr; 868 rule->has_cntr = true; 869 rsp->counter = rule->cntr; 870 } else { 871 rsp->counter = err; 872 } 873 } 874 875 static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, 876 struct mcam_entry *entry, 877 struct npc_install_flow_req *req, u16 target) 878 { 879 struct nix_rx_action action; 880 881 npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, 882 ~0ULL, 0, NIX_INTF_RX); 883 884 *(u64 *)&action = 0x00; 885 action.pf_func = target; 886 action.op = req->op; 887 action.index = req->index; 888 action.match_id = req->match_id; 889 action.flow_key_alg = req->flow_key_alg; 890 891 if (req->op == NIX_RX_ACTION_DEFAULT && pfvf->def_ucast_rule) 892 action = pfvf->def_ucast_rule->rx_action; 893 894 entry->action = *(u64 *)&action; 895 896 /* VTAG0 starts at 0th byte of LID_B. 897 * VTAG1 starts at 4th byte of LID_B. 898 */ 899 entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) | 900 FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) | 901 FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) | 902 FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) | 903 FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) | 904 FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) | 905 FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) | 906 FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4); 907 } 908 909 static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, 910 struct mcam_entry *entry, 911 struct npc_install_flow_req *req, u16 target) 912 { 913 struct nix_tx_action action; 914 915 npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target), 916 0, ~0ULL, 0, NIX_INTF_TX); 917 918 *(u64 *)&action = 0x00; 919 action.op = req->op; 920 action.index = req->index; 921 action.match_id = req->match_id; 922 923 entry->action = *(u64 *)&action; 924 925 /* VTAG0 starts at 0th byte of LID_B. 926 * VTAG1 starts at 4th byte of LID_B. 927 */ 928 entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) | 929 FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) | 930 FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) | 931 FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) | 932 FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) | 933 FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) | 934 FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) | 935 FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24); 936 } 937 938 static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, 939 int nixlf, struct rvu_pfvf *pfvf, 940 struct npc_install_flow_req *req, 941 struct npc_install_flow_rsp *rsp, bool enable, 942 bool pf_set_vfs_mac) 943 { 944 struct rvu_npc_mcam_rule *def_ucast_rule = pfvf->def_ucast_rule; 945 u64 features, installed_features, missing_features = 0; 946 struct npc_mcam_write_entry_req write_req = { 0 }; 947 struct npc_mcam *mcam = &rvu->hw->mcam; 948 struct rvu_npc_mcam_rule dummy = { 0 }; 949 struct rvu_npc_mcam_rule *rule; 950 bool new = false, msg_from_vf; 951 u16 owner = req->hdr.pcifunc; 952 struct msg_rsp write_rsp; 953 struct mcam_entry *entry; 954 int entry_index, err; 955 956 msg_from_vf = !!(owner & RVU_PFVF_FUNC_MASK); 957 958 installed_features = req->features; 959 features = req->features; 960 entry = &write_req.entry_data; 961 entry_index = req->entry; 962 963 npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy, 964 req->intf); 965 966 if (is_npc_intf_rx(req->intf)) 967 npc_update_rx_entry(rvu, pfvf, entry, req, target); 968 else 969 npc_update_tx_entry(rvu, pfvf, entry, req, target); 970 971 /* Default unicast rules do not exist for TX */ 972 if (is_npc_intf_tx(req->intf)) 973 goto find_rule; 974 975 if (def_ucast_rule) 976 missing_features = (def_ucast_rule->features ^ features) & 977 def_ucast_rule->features; 978 979 if (req->default_rule && req->append) { 980 /* add to default rule */ 981 if (missing_features) 982 npc_update_flow(rvu, entry, missing_features, 983 &def_ucast_rule->packet, 984 &def_ucast_rule->mask, 985 &dummy, req->intf); 986 enable = rvu_npc_write_default_rule(rvu, blkaddr, 987 nixlf, target, 988 pfvf->nix_rx_intf, entry, 989 &entry_index); 990 installed_features = req->features | missing_features; 991 } else if (req->default_rule && !req->append) { 992 /* overwrite default rule */ 993 enable = rvu_npc_write_default_rule(rvu, blkaddr, 994 nixlf, target, 995 pfvf->nix_rx_intf, entry, 996 &entry_index); 997 } else if (msg_from_vf) { 998 /* normal rule - include default rule also to it for VF */ 999 npc_update_flow(rvu, entry, missing_features, 1000 &def_ucast_rule->packet, &def_ucast_rule->mask, 1001 &dummy, req->intf); 1002 installed_features = req->features | missing_features; 1003 } 1004 1005 find_rule: 1006 rule = rvu_mcam_find_rule(mcam, entry_index); 1007 if (!rule) { 1008 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 1009 if (!rule) 1010 return -ENOMEM; 1011 new = true; 1012 } 1013 /* no counter for default rule */ 1014 if (req->default_rule) 1015 goto update_rule; 1016 1017 /* allocate new counter if rule has no counter */ 1018 if (req->set_cntr && !rule->has_cntr) 1019 rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp); 1020 1021 /* if user wants to delete an existing counter for a rule then 1022 * free the counter 1023 */ 1024 if (!req->set_cntr && rule->has_cntr) 1025 rvu_mcam_remove_counter_from_rule(rvu, owner, rule); 1026 1027 write_req.hdr.pcifunc = owner; 1028 write_req.entry = req->entry; 1029 write_req.intf = req->intf; 1030 write_req.enable_entry = (u8)enable; 1031 /* if counter is available then clear and use it */ 1032 if (req->set_cntr && rule->has_cntr) { 1033 rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00); 1034 write_req.set_cntr = 1; 1035 write_req.cntr = rule->cntr; 1036 } 1037 1038 err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, 1039 &write_rsp); 1040 if (err) { 1041 rvu_mcam_remove_counter_from_rule(rvu, owner, rule); 1042 if (new) 1043 kfree(rule); 1044 return err; 1045 } 1046 update_rule: 1047 memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet)); 1048 memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask)); 1049 rule->entry = entry_index; 1050 memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action)); 1051 if (is_npc_intf_tx(req->intf)) 1052 memcpy(&rule->tx_action, &entry->action, 1053 sizeof(struct nix_tx_action)); 1054 rule->vtag_action = entry->vtag_action; 1055 rule->features = installed_features; 1056 rule->default_rule = req->default_rule; 1057 rule->owner = owner; 1058 rule->enable = enable; 1059 if (is_npc_intf_tx(req->intf)) 1060 rule->intf = pfvf->nix_tx_intf; 1061 else 1062 rule->intf = pfvf->nix_rx_intf; 1063 1064 if (new) 1065 rvu_mcam_add_rule(mcam, rule); 1066 if (req->default_rule) 1067 pfvf->def_ucast_rule = rule; 1068 1069 /* VF's MAC address is being changed via PF */ 1070 if (pf_set_vfs_mac) { 1071 ether_addr_copy(pfvf->default_mac, req->packet.dmac); 1072 ether_addr_copy(pfvf->mac_addr, req->packet.dmac); 1073 } 1074 1075 if (pfvf->pf_set_vf_cfg && req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7) 1076 rule->vfvlan_cfg = true; 1077 1078 return 0; 1079 } 1080 1081 int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, 1082 struct npc_install_flow_req *req, 1083 struct npc_install_flow_rsp *rsp) 1084 { 1085 bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK); 1086 int blkaddr, nixlf, err; 1087 struct rvu_pfvf *pfvf; 1088 bool pf_set_vfs_mac = false; 1089 bool enable = true; 1090 u16 target; 1091 1092 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1093 if (blkaddr < 0) { 1094 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1095 return -ENODEV; 1096 } 1097 1098 if (!is_npc_interface_valid(rvu, req->intf)) 1099 return -EINVAL; 1100 1101 if (from_vf && req->default_rule) 1102 return NPC_MCAM_PERM_DENIED; 1103 1104 /* Each PF/VF info is maintained in struct rvu_pfvf. 1105 * rvu_pfvf for the target PF/VF needs to be retrieved 1106 * hence modify pcifunc accordingly. 1107 */ 1108 1109 /* AF installing for a PF/VF */ 1110 if (!req->hdr.pcifunc) 1111 target = req->vf; 1112 /* PF installing for its VF */ 1113 else if (!from_vf && req->vf) { 1114 target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf; 1115 pf_set_vfs_mac = req->default_rule && 1116 (req->features & BIT_ULL(NPC_DMAC)); 1117 } 1118 /* msg received from PF/VF */ 1119 else 1120 target = req->hdr.pcifunc; 1121 1122 if (npc_check_unsupported_flows(rvu, req->features, req->intf)) 1123 return -EOPNOTSUPP; 1124 1125 if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) 1126 return -EINVAL; 1127 1128 pfvf = rvu_get_pfvf(rvu, target); 1129 1130 /* PF installing for its VF */ 1131 if (req->hdr.pcifunc && !from_vf && req->vf) 1132 pfvf->pf_set_vf_cfg = 1; 1133 1134 /* update req destination mac addr */ 1135 if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) && 1136 is_zero_ether_addr(req->packet.dmac)) { 1137 ether_addr_copy(req->packet.dmac, pfvf->mac_addr); 1138 eth_broadcast_addr((u8 *)&req->mask.dmac); 1139 } 1140 1141 err = nix_get_nixlf(rvu, target, &nixlf, NULL); 1142 1143 /* If interface is uninitialized then do not enable entry */ 1144 if (err || (!req->default_rule && !pfvf->def_ucast_rule)) 1145 enable = false; 1146 1147 /* Packets reaching NPC in Tx path implies that a 1148 * NIXLF is properly setup and transmitting. 1149 * Hence rules can be enabled for Tx. 1150 */ 1151 if (is_npc_intf_tx(req->intf)) 1152 enable = true; 1153 1154 /* Do not allow requests from uninitialized VFs */ 1155 if (from_vf && !enable) 1156 return -EINVAL; 1157 1158 /* If message is from VF then its flow should not overlap with 1159 * reserved unicast flow. 1160 */ 1161 if (from_vf && pfvf->def_ucast_rule && is_npc_intf_rx(req->intf) && 1162 pfvf->def_ucast_rule->features & req->features) 1163 return -EINVAL; 1164 1165 return npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, req, rsp, 1166 enable, pf_set_vfs_mac); 1167 } 1168 1169 static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule, 1170 u16 pcifunc) 1171 { 1172 struct npc_mcam_ena_dis_entry_req dis_req = { 0 }; 1173 struct msg_rsp dis_rsp; 1174 1175 if (rule->default_rule) 1176 return 0; 1177 1178 if (rule->has_cntr) 1179 rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule); 1180 1181 dis_req.hdr.pcifunc = pcifunc; 1182 dis_req.entry = rule->entry; 1183 1184 list_del(&rule->list); 1185 kfree(rule); 1186 1187 return rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp); 1188 } 1189 1190 int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu, 1191 struct npc_delete_flow_req *req, 1192 struct msg_rsp *rsp) 1193 { 1194 struct npc_mcam *mcam = &rvu->hw->mcam; 1195 struct rvu_npc_mcam_rule *iter, *tmp; 1196 u16 pcifunc = req->hdr.pcifunc; 1197 struct list_head del_list; 1198 1199 INIT_LIST_HEAD(&del_list); 1200 1201 mutex_lock(&mcam->lock); 1202 list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) { 1203 if (iter->owner == pcifunc) { 1204 /* All rules */ 1205 if (req->all) { 1206 list_move_tail(&iter->list, &del_list); 1207 /* Range of rules */ 1208 } else if (req->end && iter->entry >= req->start && 1209 iter->entry <= req->end) { 1210 list_move_tail(&iter->list, &del_list); 1211 /* single rule */ 1212 } else if (req->entry == iter->entry) { 1213 list_move_tail(&iter->list, &del_list); 1214 break; 1215 } 1216 } 1217 } 1218 mutex_unlock(&mcam->lock); 1219 1220 list_for_each_entry_safe(iter, tmp, &del_list, list) { 1221 u16 entry = iter->entry; 1222 1223 /* clear the mcam entry target pcifunc */ 1224 mcam->entry2target_pffunc[entry] = 0x0; 1225 if (npc_delete_flow(rvu, iter, pcifunc)) 1226 dev_err(rvu->dev, "rule deletion failed for entry:%u", 1227 entry); 1228 } 1229 1230 return 0; 1231 } 1232 1233 static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr, 1234 struct rvu_npc_mcam_rule *rule, 1235 struct rvu_pfvf *pfvf) 1236 { 1237 struct npc_mcam_write_entry_req write_req = { 0 }; 1238 struct mcam_entry *entry = &write_req.entry_data; 1239 struct npc_mcam *mcam = &rvu->hw->mcam; 1240 struct msg_rsp rsp; 1241 u8 intf, enable; 1242 int err; 1243 1244 ether_addr_copy(rule->packet.dmac, pfvf->mac_addr); 1245 1246 npc_read_mcam_entry(rvu, mcam, npcblkaddr, rule->entry, 1247 entry, &intf, &enable); 1248 1249 npc_update_entry(rvu, NPC_DMAC, entry, 1250 ether_addr_to_u64(pfvf->mac_addr), 0, 1251 0xffffffffffffull, 0, intf); 1252 1253 write_req.hdr.pcifunc = rule->owner; 1254 write_req.entry = rule->entry; 1255 1256 mutex_unlock(&mcam->lock); 1257 err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp); 1258 mutex_lock(&mcam->lock); 1259 1260 return err; 1261 } 1262 1263 void npc_mcam_enable_flows(struct rvu *rvu, u16 target) 1264 { 1265 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target); 1266 struct rvu_npc_mcam_rule *def_ucast_rule; 1267 struct npc_mcam *mcam = &rvu->hw->mcam; 1268 struct rvu_npc_mcam_rule *rule; 1269 int blkaddr, bank, index; 1270 u64 def_action; 1271 1272 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1273 if (blkaddr < 0) 1274 return; 1275 1276 def_ucast_rule = pfvf->def_ucast_rule; 1277 1278 mutex_lock(&mcam->lock); 1279 list_for_each_entry(rule, &mcam->mcam_rules, list) { 1280 if (is_npc_intf_rx(rule->intf) && 1281 rule->rx_action.pf_func == target && !rule->enable) { 1282 if (rule->default_rule) { 1283 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1284 rule->entry, true); 1285 rule->enable = true; 1286 continue; 1287 } 1288 1289 if (rule->vfvlan_cfg) 1290 npc_update_dmac_value(rvu, blkaddr, rule, pfvf); 1291 1292 if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) { 1293 if (!def_ucast_rule) 1294 continue; 1295 /* Use default unicast entry action */ 1296 rule->rx_action = def_ucast_rule->rx_action; 1297 def_action = *(u64 *)&def_ucast_rule->rx_action; 1298 bank = npc_get_bank(mcam, rule->entry); 1299 rvu_write64(rvu, blkaddr, 1300 NPC_AF_MCAMEX_BANKX_ACTION 1301 (rule->entry, bank), def_action); 1302 } 1303 1304 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1305 rule->entry, true); 1306 rule->enable = true; 1307 } 1308 } 1309 1310 /* Enable MCAM entries installed by PF with target as VF pcifunc */ 1311 for (index = 0; index < mcam->bmap_entries; index++) { 1312 if (mcam->entry2target_pffunc[index] == target) 1313 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1314 index, true); 1315 } 1316 mutex_unlock(&mcam->lock); 1317 } 1318 1319 void npc_mcam_disable_flows(struct rvu *rvu, u16 target) 1320 { 1321 struct npc_mcam *mcam = &rvu->hw->mcam; 1322 int blkaddr, index; 1323 1324 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1325 if (blkaddr < 0) 1326 return; 1327 1328 mutex_lock(&mcam->lock); 1329 /* Disable MCAM entries installed by PF with target as VF pcifunc */ 1330 for (index = 0; index < mcam->bmap_entries; index++) { 1331 if (mcam->entry2target_pffunc[index] == target) 1332 npc_enable_mcam_entry(rvu, mcam, blkaddr, 1333 index, false); 1334 } 1335 mutex_unlock(&mcam->lock); 1336 } 1337