1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2019 NXP */ 3 4 #include "enetc.h" 5 6 #include <net/pkt_sched.h> 7 #include <linux/math64.h> 8 #include <linux/refcount.h> 9 #include <net/pkt_cls.h> 10 #include <net/tc_act/tc_gate.h> 11 12 static u16 enetc_get_max_gcl_len(struct enetc_hw *hw) 13 { 14 return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET) 15 & ENETC_QBV_MAX_GCL_LEN_MASK; 16 } 17 18 void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed) 19 { 20 u32 old_speed = priv->speed; 21 u32 pspeed; 22 23 if (speed == old_speed) 24 return; 25 26 switch (speed) { 27 case SPEED_1000: 28 pspeed = ENETC_PMR_PSPEED_1000M; 29 break; 30 case SPEED_2500: 31 pspeed = ENETC_PMR_PSPEED_2500M; 32 break; 33 case SPEED_100: 34 pspeed = ENETC_PMR_PSPEED_100M; 35 break; 36 case SPEED_10: 37 default: 38 pspeed = ENETC_PMR_PSPEED_10M; 39 } 40 41 priv->speed = speed; 42 enetc_port_wr(&priv->si->hw, ENETC_PMR, 43 (enetc_port_rd(&priv->si->hw, ENETC_PMR) 44 & (~ENETC_PMR_PSPEED_MASK)) 45 | pspeed); 46 } 47 48 static int enetc_setup_taprio(struct net_device *ndev, 49 struct tc_taprio_qopt_offload *admin_conf) 50 { 51 struct enetc_ndev_priv *priv = netdev_priv(ndev); 52 struct enetc_cbd cbd = {.cmd = 0}; 53 struct tgs_gcl_conf *gcl_config; 54 struct tgs_gcl_data *gcl_data; 55 struct gce *gce; 56 dma_addr_t dma; 57 u16 data_size; 58 u16 gcl_len; 59 u32 tge; 60 int err; 61 int i; 62 63 if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw)) 64 return -EINVAL; 65 gcl_len = admin_conf->num_entries; 66 67 tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET); 68 if (!admin_conf->enable) { 69 enetc_wr(&priv->si->hw, 70 ENETC_QBV_PTGCR_OFFSET, 71 tge & (~ENETC_QBV_TGE)); 72 return 0; 73 } 74 75 if (admin_conf->cycle_time > U32_MAX || 76 admin_conf->cycle_time_extension > U32_MAX) 77 return -EINVAL; 78 79 /* Configure the (administrative) gate control list using the 80 * control BD descriptor. 81 */ 82 gcl_config = &cbd.gcl_conf; 83 84 data_size = struct_size(gcl_data, entry, gcl_len); 85 gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 86 if (!gcl_data) 87 return -ENOMEM; 88 89 gce = (struct gce *)(gcl_data + 1); 90 91 /* Set all gates open as default */ 92 gcl_config->atc = 0xff; 93 gcl_config->acl_len = cpu_to_le16(gcl_len); 94 95 if (!admin_conf->base_time) { 96 gcl_data->btl = 97 cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0)); 98 gcl_data->bth = 99 cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1)); 100 } else { 101 gcl_data->btl = 102 cpu_to_le32(lower_32_bits(admin_conf->base_time)); 103 gcl_data->bth = 104 cpu_to_le32(upper_32_bits(admin_conf->base_time)); 105 } 106 107 gcl_data->ct = cpu_to_le32(admin_conf->cycle_time); 108 gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension); 109 110 for (i = 0; i < gcl_len; i++) { 111 struct tc_taprio_sched_entry *temp_entry; 112 struct gce *temp_gce = gce + i; 113 114 temp_entry = &admin_conf->entries[i]; 115 116 temp_gce->gate = (u8)temp_entry->gate_mask; 117 temp_gce->period = cpu_to_le32(temp_entry->interval); 118 } 119 120 cbd.length = cpu_to_le16(data_size); 121 cbd.status_flags = 0; 122 123 dma = dma_map_single(&priv->si->pdev->dev, gcl_data, 124 data_size, DMA_TO_DEVICE); 125 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 126 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 127 kfree(gcl_data); 128 return -ENOMEM; 129 } 130 131 cbd.addr[0] = lower_32_bits(dma); 132 cbd.addr[1] = upper_32_bits(dma); 133 cbd.cls = BDCR_CMD_PORT_GCL; 134 cbd.status_flags = 0; 135 136 enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET, 137 tge | ENETC_QBV_TGE); 138 139 err = enetc_send_cmd(priv->si, &cbd); 140 if (err) 141 enetc_wr(&priv->si->hw, 142 ENETC_QBV_PTGCR_OFFSET, 143 tge & (~ENETC_QBV_TGE)); 144 145 dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE); 146 kfree(gcl_data); 147 148 return err; 149 } 150 151 int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) 152 { 153 struct tc_taprio_qopt_offload *taprio = type_data; 154 struct enetc_ndev_priv *priv = netdev_priv(ndev); 155 int err; 156 int i; 157 158 /* TSD and Qbv are mutually exclusive in hardware */ 159 for (i = 0; i < priv->num_tx_rings; i++) 160 if (priv->tx_ring[i]->tsd_enable) 161 return -EBUSY; 162 163 for (i = 0; i < priv->num_tx_rings; i++) 164 enetc_set_bdr_prio(&priv->si->hw, 165 priv->tx_ring[i]->index, 166 taprio->enable ? i : 0); 167 168 err = enetc_setup_taprio(ndev, taprio); 169 170 if (err) 171 for (i = 0; i < priv->num_tx_rings; i++) 172 enetc_set_bdr_prio(&priv->si->hw, 173 priv->tx_ring[i]->index, 174 taprio->enable ? 0 : i); 175 176 return err; 177 } 178 179 static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc) 180 { 181 return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE; 182 } 183 184 static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc) 185 { 186 return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK; 187 } 188 189 int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) 190 { 191 struct enetc_ndev_priv *priv = netdev_priv(ndev); 192 struct tc_cbs_qopt_offload *cbs = type_data; 193 u32 port_transmit_rate = priv->speed; 194 u8 tc_nums = netdev_get_num_tc(ndev); 195 struct enetc_si *si = priv->si; 196 u32 hi_credit_bit, hi_credit_reg; 197 u32 max_interference_size; 198 u32 port_frame_max_size; 199 u8 tc = cbs->queue; 200 u8 prio_top, prio_next; 201 int bw_sum = 0; 202 u8 bw; 203 204 prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1); 205 prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2); 206 207 /* Support highest prio and second prio tc in cbs mode */ 208 if (tc != prio_top && tc != prio_next) 209 return -EOPNOTSUPP; 210 211 if (!cbs->enable) { 212 /* Make sure the other TC that are numerically 213 * lower than this TC have been disabled. 214 */ 215 if (tc == prio_top && 216 enetc_get_cbs_enable(&si->hw, prio_next)) { 217 dev_err(&ndev->dev, 218 "Disable TC%d before disable TC%d\n", 219 prio_next, tc); 220 return -EINVAL; 221 } 222 223 enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0); 224 enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0); 225 226 return 0; 227 } 228 229 if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L || 230 cbs->idleslope < 0 || cbs->sendslope > 0) 231 return -EOPNOTSUPP; 232 233 port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 234 235 bw = cbs->idleslope / (port_transmit_rate * 10UL); 236 237 /* Make sure the other TC that are numerically 238 * higher than this TC have been enabled. 239 */ 240 if (tc == prio_next) { 241 if (!enetc_get_cbs_enable(&si->hw, prio_top)) { 242 dev_err(&ndev->dev, 243 "Enable TC%d first before enable TC%d\n", 244 prio_top, prio_next); 245 return -EINVAL; 246 } 247 bw_sum += enetc_get_cbs_bw(&si->hw, prio_top); 248 } 249 250 if (bw_sum + bw >= 100) { 251 dev_err(&ndev->dev, 252 "The sum of all CBS Bandwidth can't exceed 100\n"); 253 return -EINVAL; 254 } 255 256 enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc)); 257 258 /* For top prio TC, the max_interfrence_size is maxSizedFrame. 259 * 260 * For next prio TC, the max_interfrence_size is calculated as below: 261 * 262 * max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra) 263 * 264 * - RA: idleSlope for AVB Class A 265 * - R0: port transmit rate 266 * - M0: maximum sized frame for the port 267 * - MA: maximum sized frame for AVB Class A 268 */ 269 270 if (tc == prio_top) { 271 max_interference_size = port_frame_max_size * 8; 272 } else { 273 u32 m0, ma, r0, ra; 274 275 m0 = port_frame_max_size * 8; 276 ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8; 277 ra = enetc_get_cbs_bw(&si->hw, prio_top) * 278 port_transmit_rate * 10000ULL; 279 r0 = port_transmit_rate * 1000000ULL; 280 max_interference_size = m0 + ma + 281 (u32)div_u64((u64)ra * m0, r0 - ra); 282 } 283 284 /* hiCredit bits calculate by: 285 * 286 * maxSizedFrame * (idleSlope/portTxRate) 287 */ 288 hi_credit_bit = max_interference_size * bw / 100; 289 290 /* hiCredit bits to hiCredit register need to calculated as: 291 * 292 * (enetClockFrequency / portTransmitRate) * 100 293 */ 294 hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit, 295 port_transmit_rate * 1000000ULL); 296 297 enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg); 298 299 /* Set bw register and enable this traffic class */ 300 enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE); 301 302 return 0; 303 } 304 305 int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) 306 { 307 struct enetc_ndev_priv *priv = netdev_priv(ndev); 308 struct tc_etf_qopt_offload *qopt = type_data; 309 u8 tc_nums = netdev_get_num_tc(ndev); 310 int tc; 311 312 if (!tc_nums) 313 return -EOPNOTSUPP; 314 315 tc = qopt->queue; 316 317 if (tc < 0 || tc >= priv->num_tx_rings) 318 return -EINVAL; 319 320 /* Do not support TXSTART and TX CSUM offload simutaniously */ 321 if (ndev->features & NETIF_F_CSUM_MASK) 322 return -EBUSY; 323 324 /* TSD and Qbv are mutually exclusive in hardware */ 325 if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) 326 return -EBUSY; 327 328 priv->tx_ring[tc]->tsd_enable = qopt->enable; 329 enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc), 330 qopt->enable ? ENETC_TSDE : 0); 331 332 return 0; 333 } 334 335 enum streamid_type { 336 STREAMID_TYPE_RESERVED = 0, 337 STREAMID_TYPE_NULL, 338 STREAMID_TYPE_SMAC, 339 }; 340 341 enum streamid_vlan_tagged { 342 STREAMID_VLAN_RESERVED = 0, 343 STREAMID_VLAN_TAGGED, 344 STREAMID_VLAN_UNTAGGED, 345 STREAMID_VLAN_ALL, 346 }; 347 348 #define ENETC_PSFP_WILDCARD -1 349 #define HANDLE_OFFSET 100 350 351 enum forward_type { 352 FILTER_ACTION_TYPE_PSFP = BIT(0), 353 FILTER_ACTION_TYPE_ACL = BIT(1), 354 FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0), 355 }; 356 357 /* This is for limit output type for input actions */ 358 struct actions_fwd { 359 u64 actions; 360 u64 keys; /* include the must needed keys */ 361 enum forward_type output; 362 }; 363 364 struct psfp_streamfilter_counters { 365 u64 matching_frames_count; 366 u64 passing_frames_count; 367 u64 not_passing_frames_count; 368 u64 passing_sdu_count; 369 u64 not_passing_sdu_count; 370 u64 red_frames_count; 371 }; 372 373 struct enetc_streamid { 374 u32 index; 375 union { 376 u8 src_mac[6]; 377 u8 dst_mac[6]; 378 }; 379 u8 filtertype; 380 u16 vid; 381 u8 tagged; 382 s32 handle; 383 }; 384 385 struct enetc_psfp_filter { 386 u32 index; 387 s32 handle; 388 s8 prio; 389 u32 maxsdu; 390 u32 gate_id; 391 s32 meter_id; 392 refcount_t refcount; 393 struct hlist_node node; 394 }; 395 396 struct enetc_psfp_gate { 397 u32 index; 398 s8 init_ipv; 399 u64 basetime; 400 u64 cycletime; 401 u64 cycletimext; 402 u32 num_entries; 403 refcount_t refcount; 404 struct hlist_node node; 405 struct action_gate_entry entries[0]; 406 }; 407 408 /* Only enable the green color frame now 409 * Will add eir and ebs color blind, couple flag etc when 410 * policing action add more offloading parameters 411 */ 412 struct enetc_psfp_meter { 413 u32 index; 414 u32 cir; 415 u32 cbs; 416 refcount_t refcount; 417 struct hlist_node node; 418 }; 419 420 #define ENETC_PSFP_FLAGS_FMI BIT(0) 421 422 struct enetc_stream_filter { 423 struct enetc_streamid sid; 424 u32 sfi_index; 425 u32 sgi_index; 426 u32 flags; 427 u32 fmi_index; 428 struct flow_stats stats; 429 struct hlist_node node; 430 }; 431 432 struct enetc_psfp { 433 unsigned long dev_bitmap; 434 unsigned long *psfp_sfi_bitmap; 435 struct hlist_head stream_list; 436 struct hlist_head psfp_filter_list; 437 struct hlist_head psfp_gate_list; 438 struct hlist_head psfp_meter_list; 439 spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */ 440 }; 441 442 static struct actions_fwd enetc_act_fwd[] = { 443 { 444 BIT(FLOW_ACTION_GATE), 445 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), 446 FILTER_ACTION_TYPE_PSFP 447 }, 448 { 449 BIT(FLOW_ACTION_POLICE) | 450 BIT(FLOW_ACTION_GATE), 451 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), 452 FILTER_ACTION_TYPE_PSFP 453 }, 454 /* example for ACL actions */ 455 { 456 BIT(FLOW_ACTION_DROP), 457 0, 458 FILTER_ACTION_TYPE_ACL 459 } 460 }; 461 462 static struct enetc_psfp epsfp = { 463 .psfp_sfi_bitmap = NULL, 464 }; 465 466 static LIST_HEAD(enetc_block_cb_list); 467 468 static inline int enetc_get_port(struct enetc_ndev_priv *priv) 469 { 470 return priv->si->pdev->devfn & 0x7; 471 } 472 473 /* Stream Identity Entry Set Descriptor */ 474 static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, 475 struct enetc_streamid *sid, 476 u8 enable) 477 { 478 struct enetc_cbd cbd = {.cmd = 0}; 479 struct streamid_data *si_data; 480 struct streamid_conf *si_conf; 481 u16 data_size; 482 dma_addr_t dma; 483 int err; 484 485 if (sid->index >= priv->psfp_cap.max_streamid) 486 return -EINVAL; 487 488 if (sid->filtertype != STREAMID_TYPE_NULL && 489 sid->filtertype != STREAMID_TYPE_SMAC) 490 return -EOPNOTSUPP; 491 492 /* Disable operation before enable */ 493 cbd.index = cpu_to_le16((u16)sid->index); 494 cbd.cls = BDCR_CMD_STREAM_IDENTIFY; 495 cbd.status_flags = 0; 496 497 data_size = sizeof(struct streamid_data); 498 si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 499 cbd.length = cpu_to_le16(data_size); 500 501 dma = dma_map_single(&priv->si->pdev->dev, si_data, 502 data_size, DMA_FROM_DEVICE); 503 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 504 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 505 kfree(si_data); 506 return -ENOMEM; 507 } 508 509 cbd.addr[0] = lower_32_bits(dma); 510 cbd.addr[1] = upper_32_bits(dma); 511 eth_broadcast_addr(si_data->dmac); 512 si_data->vid_vidm_tg = 513 cpu_to_le16(ENETC_CBDR_SID_VID_MASK 514 + ((0x3 << 14) | ENETC_CBDR_SID_VIDM)); 515 516 si_conf = &cbd.sid_set; 517 /* Only one port supported for one entry, set itself */ 518 si_conf->iports = 1 << enetc_get_port(priv); 519 si_conf->id_type = 1; 520 si_conf->oui[2] = 0x0; 521 si_conf->oui[1] = 0x80; 522 si_conf->oui[0] = 0xC2; 523 524 err = enetc_send_cmd(priv->si, &cbd); 525 if (err) 526 return -EINVAL; 527 528 if (!enable) { 529 kfree(si_data); 530 return 0; 531 } 532 533 /* Enable the entry overwrite again incase space flushed by hardware */ 534 memset(&cbd, 0, sizeof(cbd)); 535 536 cbd.index = cpu_to_le16((u16)sid->index); 537 cbd.cmd = 0; 538 cbd.cls = BDCR_CMD_STREAM_IDENTIFY; 539 cbd.status_flags = 0; 540 541 si_conf->en = 0x80; 542 si_conf->stream_handle = cpu_to_le32(sid->handle); 543 si_conf->iports = 1 << enetc_get_port(priv); 544 si_conf->id_type = sid->filtertype; 545 si_conf->oui[2] = 0x0; 546 si_conf->oui[1] = 0x80; 547 si_conf->oui[0] = 0xC2; 548 549 memset(si_data, 0, data_size); 550 551 cbd.length = cpu_to_le16(data_size); 552 553 cbd.addr[0] = lower_32_bits(dma); 554 cbd.addr[1] = upper_32_bits(dma); 555 556 /* VIDM default to be 1. 557 * VID Match. If set (b1) then the VID must match, otherwise 558 * any VID is considered a match. VIDM setting is only used 559 * when TG is set to b01. 560 */ 561 if (si_conf->id_type == STREAMID_TYPE_NULL) { 562 ether_addr_copy(si_data->dmac, sid->dst_mac); 563 si_data->vid_vidm_tg = 564 cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) + 565 ((((u16)(sid->tagged) & 0x3) << 14) 566 | ENETC_CBDR_SID_VIDM)); 567 } else if (si_conf->id_type == STREAMID_TYPE_SMAC) { 568 ether_addr_copy(si_data->smac, sid->src_mac); 569 si_data->vid_vidm_tg = 570 cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) + 571 ((((u16)(sid->tagged) & 0x3) << 14) 572 | ENETC_CBDR_SID_VIDM)); 573 } 574 575 err = enetc_send_cmd(priv->si, &cbd); 576 kfree(si_data); 577 578 return err; 579 } 580 581 /* Stream Filter Instance Set Descriptor */ 582 static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv, 583 struct enetc_psfp_filter *sfi, 584 u8 enable) 585 { 586 struct enetc_cbd cbd = {.cmd = 0}; 587 struct sfi_conf *sfi_config; 588 589 cbd.index = cpu_to_le16(sfi->index); 590 cbd.cls = BDCR_CMD_STREAM_FILTER; 591 cbd.status_flags = 0x80; 592 cbd.length = cpu_to_le16(1); 593 594 sfi_config = &cbd.sfi_conf; 595 if (!enable) 596 goto exit; 597 598 sfi_config->en = 0x80; 599 600 if (sfi->handle >= 0) { 601 sfi_config->stream_handle = 602 cpu_to_le32(sfi->handle); 603 sfi_config->sthm |= 0x80; 604 } 605 606 sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id); 607 sfi_config->input_ports = 1 << enetc_get_port(priv); 608 609 /* The priority value which may be matched against the 610 * frame’s priority value to determine a match for this entry. 611 */ 612 if (sfi->prio >= 0) 613 sfi_config->multi |= (sfi->prio & 0x7) | 0x8; 614 615 /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX 616 * field as being either an MSDU value or an index into the Flow 617 * Meter Instance table. 618 */ 619 if (sfi->maxsdu) { 620 sfi_config->msdu = 621 cpu_to_le16(sfi->maxsdu); 622 sfi_config->multi |= 0x40; 623 } 624 625 if (sfi->meter_id >= 0) { 626 sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id); 627 sfi_config->multi |= 0x80; 628 } 629 630 exit: 631 return enetc_send_cmd(priv->si, &cbd); 632 } 633 634 static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv, 635 u32 index, 636 struct psfp_streamfilter_counters *cnt) 637 { 638 struct enetc_cbd cbd = { .cmd = 2 }; 639 struct sfi_counter_data *data_buf; 640 dma_addr_t dma; 641 u16 data_size; 642 int err; 643 644 cbd.index = cpu_to_le16((u16)index); 645 cbd.cmd = 2; 646 cbd.cls = BDCR_CMD_STREAM_FILTER; 647 cbd.status_flags = 0; 648 649 data_size = sizeof(struct sfi_counter_data); 650 data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 651 if (!data_buf) 652 return -ENOMEM; 653 654 dma = dma_map_single(&priv->si->pdev->dev, data_buf, 655 data_size, DMA_FROM_DEVICE); 656 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 657 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 658 err = -ENOMEM; 659 goto exit; 660 } 661 cbd.addr[0] = lower_32_bits(dma); 662 cbd.addr[1] = upper_32_bits(dma); 663 664 cbd.length = cpu_to_le16(data_size); 665 666 err = enetc_send_cmd(priv->si, &cbd); 667 if (err) 668 goto exit; 669 670 cnt->matching_frames_count = 671 ((u64)le32_to_cpu(data_buf->matchh) << 32) 672 + data_buf->matchl; 673 674 cnt->not_passing_sdu_count = 675 ((u64)le32_to_cpu(data_buf->msdu_droph) << 32) 676 + data_buf->msdu_dropl; 677 678 cnt->passing_sdu_count = cnt->matching_frames_count 679 - cnt->not_passing_sdu_count; 680 681 cnt->not_passing_frames_count = 682 ((u64)le32_to_cpu(data_buf->stream_gate_droph) << 32) 683 + le32_to_cpu(data_buf->stream_gate_dropl); 684 685 cnt->passing_frames_count = cnt->matching_frames_count 686 - cnt->not_passing_sdu_count 687 - cnt->not_passing_frames_count; 688 689 cnt->red_frames_count = 690 ((u64)le32_to_cpu(data_buf->flow_meter_droph) << 32) 691 + le32_to_cpu(data_buf->flow_meter_dropl); 692 693 exit: 694 kfree(data_buf); 695 return err; 696 } 697 698 static u64 get_ptp_now(struct enetc_hw *hw) 699 { 700 u64 now_lo, now_hi, now; 701 702 now_lo = enetc_rd(hw, ENETC_SICTR0); 703 now_hi = enetc_rd(hw, ENETC_SICTR1); 704 now = now_lo | now_hi << 32; 705 706 return now; 707 } 708 709 static int get_start_ns(u64 now, u64 cycle, u64 *start) 710 { 711 u64 n; 712 713 if (!cycle) 714 return -EFAULT; 715 716 n = div64_u64(now, cycle); 717 718 *start = (n + 1) * cycle; 719 720 return 0; 721 } 722 723 /* Stream Gate Instance Set Descriptor */ 724 static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, 725 struct enetc_psfp_gate *sgi, 726 u8 enable) 727 { 728 struct enetc_cbd cbd = { .cmd = 0 }; 729 struct sgi_table *sgi_config; 730 struct sgcl_conf *sgcl_config; 731 struct sgcl_data *sgcl_data; 732 struct sgce *sgce; 733 dma_addr_t dma; 734 u16 data_size; 735 int err, i; 736 u64 now; 737 738 cbd.index = cpu_to_le16(sgi->index); 739 cbd.cmd = 0; 740 cbd.cls = BDCR_CMD_STREAM_GCL; 741 cbd.status_flags = 0x80; 742 743 /* disable */ 744 if (!enable) 745 return enetc_send_cmd(priv->si, &cbd); 746 747 if (!sgi->num_entries) 748 return 0; 749 750 if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist || 751 !sgi->cycletime) 752 return -EINVAL; 753 754 /* enable */ 755 sgi_config = &cbd.sgi_table; 756 757 /* Keep open before gate list start */ 758 sgi_config->ocgtst = 0x80; 759 760 sgi_config->oipv = (sgi->init_ipv < 0) ? 761 0x0 : ((sgi->init_ipv & 0x7) | 0x8); 762 763 sgi_config->en = 0x80; 764 765 /* Basic config */ 766 err = enetc_send_cmd(priv->si, &cbd); 767 if (err) 768 return -EINVAL; 769 770 memset(&cbd, 0, sizeof(cbd)); 771 772 cbd.index = cpu_to_le16(sgi->index); 773 cbd.cmd = 1; 774 cbd.cls = BDCR_CMD_STREAM_GCL; 775 cbd.status_flags = 0; 776 777 sgcl_config = &cbd.sgcl_conf; 778 779 sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3; 780 781 data_size = struct_size(sgcl_data, sgcl, sgi->num_entries); 782 783 sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 784 if (!sgcl_data) 785 return -ENOMEM; 786 787 cbd.length = cpu_to_le16(data_size); 788 789 dma = dma_map_single(&priv->si->pdev->dev, 790 sgcl_data, data_size, 791 DMA_FROM_DEVICE); 792 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 793 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 794 kfree(sgcl_data); 795 return -ENOMEM; 796 } 797 798 cbd.addr[0] = lower_32_bits(dma); 799 cbd.addr[1] = upper_32_bits(dma); 800 801 sgce = &sgcl_data->sgcl[0]; 802 803 sgcl_config->agtst = 0x80; 804 805 sgcl_data->ct = cpu_to_le32(sgi->cycletime); 806 sgcl_data->cte = cpu_to_le32(sgi->cycletimext); 807 808 if (sgi->init_ipv >= 0) 809 sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8; 810 811 for (i = 0; i < sgi->num_entries; i++) { 812 struct action_gate_entry *from = &sgi->entries[i]; 813 struct sgce *to = &sgce[i]; 814 815 if (from->gate_state) 816 to->multi |= 0x10; 817 818 if (from->ipv >= 0) 819 to->multi |= ((from->ipv & 0x7) << 5) | 0x08; 820 821 if (from->maxoctets >= 0) { 822 to->multi |= 0x01; 823 to->msdu[0] = from->maxoctets & 0xFF; 824 to->msdu[1] = (from->maxoctets >> 8) & 0xFF; 825 to->msdu[2] = (from->maxoctets >> 16) & 0xFF; 826 } 827 828 to->interval = cpu_to_le32(from->interval); 829 } 830 831 /* If basetime is less than now, calculate start time */ 832 now = get_ptp_now(&priv->si->hw); 833 834 if (sgi->basetime < now) { 835 u64 start; 836 837 err = get_start_ns(now, sgi->cycletime, &start); 838 if (err) 839 goto exit; 840 sgcl_data->btl = cpu_to_le32(lower_32_bits(start)); 841 sgcl_data->bth = cpu_to_le32(upper_32_bits(start)); 842 } else { 843 u32 hi, lo; 844 845 hi = upper_32_bits(sgi->basetime); 846 lo = lower_32_bits(sgi->basetime); 847 sgcl_data->bth = cpu_to_le32(hi); 848 sgcl_data->btl = cpu_to_le32(lo); 849 } 850 851 err = enetc_send_cmd(priv->si, &cbd); 852 853 exit: 854 kfree(sgcl_data); 855 856 return err; 857 } 858 859 static int enetc_flowmeter_hw_set(struct enetc_ndev_priv *priv, 860 struct enetc_psfp_meter *fmi, 861 u8 enable) 862 { 863 struct enetc_cbd cbd = { .cmd = 0 }; 864 struct fmi_conf *fmi_config; 865 u64 temp = 0; 866 867 cbd.index = cpu_to_le16((u16)fmi->index); 868 cbd.cls = BDCR_CMD_FLOW_METER; 869 cbd.status_flags = 0x80; 870 871 if (!enable) 872 return enetc_send_cmd(priv->si, &cbd); 873 874 fmi_config = &cbd.fmi_conf; 875 fmi_config->en = 0x80; 876 877 if (fmi->cir) { 878 temp = (u64)8000 * fmi->cir; 879 temp = div_u64(temp, 3725); 880 } 881 882 fmi_config->cir = cpu_to_le32((u32)temp); 883 fmi_config->cbs = cpu_to_le32(fmi->cbs); 884 885 /* Default for eir ebs disable */ 886 fmi_config->eir = 0; 887 fmi_config->ebs = 0; 888 889 /* Default: 890 * mark red disable 891 * drop on yellow disable 892 * color mode disable 893 * couple flag disable 894 */ 895 fmi_config->conf = 0; 896 897 return enetc_send_cmd(priv->si, &cbd); 898 } 899 900 static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index) 901 { 902 struct enetc_stream_filter *f; 903 904 hlist_for_each_entry(f, &epsfp.stream_list, node) 905 if (f->sid.index == index) 906 return f; 907 908 return NULL; 909 } 910 911 static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index) 912 { 913 struct enetc_psfp_gate *g; 914 915 hlist_for_each_entry(g, &epsfp.psfp_gate_list, node) 916 if (g->index == index) 917 return g; 918 919 return NULL; 920 } 921 922 static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index) 923 { 924 struct enetc_psfp_filter *s; 925 926 hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) 927 if (s->index == index) 928 return s; 929 930 return NULL; 931 } 932 933 static struct enetc_psfp_meter *enetc_get_meter_by_index(u32 index) 934 { 935 struct enetc_psfp_meter *m; 936 937 hlist_for_each_entry(m, &epsfp.psfp_meter_list, node) 938 if (m->index == index) 939 return m; 940 941 return NULL; 942 } 943 944 static struct enetc_psfp_filter 945 *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi) 946 { 947 struct enetc_psfp_filter *s; 948 949 hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) 950 if (s->gate_id == sfi->gate_id && 951 s->prio == sfi->prio && 952 s->maxsdu == sfi->maxsdu && 953 s->meter_id == sfi->meter_id) 954 return s; 955 956 return NULL; 957 } 958 959 static int enetc_get_free_index(struct enetc_ndev_priv *priv) 960 { 961 u32 max_size = priv->psfp_cap.max_psfp_filter; 962 unsigned long index; 963 964 index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size); 965 if (index == max_size) 966 return -1; 967 968 return index; 969 } 970 971 static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index) 972 { 973 struct enetc_psfp_filter *sfi; 974 u8 z; 975 976 sfi = enetc_get_filter_by_index(index); 977 WARN_ON(!sfi); 978 z = refcount_dec_and_test(&sfi->refcount); 979 980 if (z) { 981 enetc_streamfilter_hw_set(priv, sfi, false); 982 hlist_del(&sfi->node); 983 kfree(sfi); 984 clear_bit(index, epsfp.psfp_sfi_bitmap); 985 } 986 } 987 988 static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index) 989 { 990 struct enetc_psfp_gate *sgi; 991 u8 z; 992 993 sgi = enetc_get_gate_by_index(index); 994 WARN_ON(!sgi); 995 z = refcount_dec_and_test(&sgi->refcount); 996 if (z) { 997 enetc_streamgate_hw_set(priv, sgi, false); 998 hlist_del(&sgi->node); 999 kfree(sgi); 1000 } 1001 } 1002 1003 static void flow_meter_unref(struct enetc_ndev_priv *priv, u32 index) 1004 { 1005 struct enetc_psfp_meter *fmi; 1006 u8 z; 1007 1008 fmi = enetc_get_meter_by_index(index); 1009 WARN_ON(!fmi); 1010 z = refcount_dec_and_test(&fmi->refcount); 1011 if (z) { 1012 enetc_flowmeter_hw_set(priv, fmi, false); 1013 hlist_del(&fmi->node); 1014 kfree(fmi); 1015 } 1016 } 1017 1018 static void remove_one_chain(struct enetc_ndev_priv *priv, 1019 struct enetc_stream_filter *filter) 1020 { 1021 if (filter->flags & ENETC_PSFP_FLAGS_FMI) 1022 flow_meter_unref(priv, filter->fmi_index); 1023 1024 stream_gate_unref(priv, filter->sgi_index); 1025 stream_filter_unref(priv, filter->sfi_index); 1026 1027 hlist_del(&filter->node); 1028 kfree(filter); 1029 } 1030 1031 static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv, 1032 struct enetc_streamid *sid, 1033 struct enetc_psfp_filter *sfi, 1034 struct enetc_psfp_gate *sgi, 1035 struct enetc_psfp_meter *fmi) 1036 { 1037 int err; 1038 1039 err = enetc_streamid_hw_set(priv, sid, true); 1040 if (err) 1041 return err; 1042 1043 if (sfi) { 1044 err = enetc_streamfilter_hw_set(priv, sfi, true); 1045 if (err) 1046 goto revert_sid; 1047 } 1048 1049 err = enetc_streamgate_hw_set(priv, sgi, true); 1050 if (err) 1051 goto revert_sfi; 1052 1053 if (fmi) { 1054 err = enetc_flowmeter_hw_set(priv, fmi, true); 1055 if (err) 1056 goto revert_sgi; 1057 } 1058 1059 return 0; 1060 1061 revert_sgi: 1062 enetc_streamgate_hw_set(priv, sgi, false); 1063 revert_sfi: 1064 if (sfi) 1065 enetc_streamfilter_hw_set(priv, sfi, false); 1066 revert_sid: 1067 enetc_streamid_hw_set(priv, sid, false); 1068 return err; 1069 } 1070 1071 static struct actions_fwd *enetc_check_flow_actions(u64 acts, 1072 unsigned int inputkeys) 1073 { 1074 int i; 1075 1076 for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++) 1077 if (acts == enetc_act_fwd[i].actions && 1078 inputkeys & enetc_act_fwd[i].keys) 1079 return &enetc_act_fwd[i]; 1080 1081 return NULL; 1082 } 1083 1084 static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, 1085 struct flow_cls_offload *f) 1086 { 1087 struct flow_action_entry *entryg = NULL, *entryp = NULL; 1088 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 1089 struct netlink_ext_ack *extack = f->common.extack; 1090 struct enetc_stream_filter *filter, *old_filter; 1091 struct enetc_psfp_meter *fmi = NULL, *old_fmi; 1092 struct enetc_psfp_filter *sfi, *old_sfi; 1093 struct enetc_psfp_gate *sgi, *old_sgi; 1094 struct flow_action_entry *entry; 1095 struct action_gate_entry *e; 1096 u8 sfi_overwrite = 0; 1097 int entries_size; 1098 int i, err; 1099 1100 if (f->common.chain_index >= priv->psfp_cap.max_streamid) { 1101 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); 1102 return -ENOSPC; 1103 } 1104 1105 flow_action_for_each(i, entry, &rule->action) 1106 if (entry->id == FLOW_ACTION_GATE) 1107 entryg = entry; 1108 else if (entry->id == FLOW_ACTION_POLICE) 1109 entryp = entry; 1110 1111 /* Not support without gate action */ 1112 if (!entryg) 1113 return -EINVAL; 1114 1115 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 1116 if (!filter) 1117 return -ENOMEM; 1118 1119 filter->sid.index = f->common.chain_index; 1120 1121 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 1122 struct flow_match_eth_addrs match; 1123 1124 flow_rule_match_eth_addrs(rule, &match); 1125 1126 if (!is_zero_ether_addr(match.mask->dst) && 1127 !is_zero_ether_addr(match.mask->src)) { 1128 NL_SET_ERR_MSG_MOD(extack, 1129 "Cannot match on both source and destination MAC"); 1130 err = -EINVAL; 1131 goto free_filter; 1132 } 1133 1134 if (!is_zero_ether_addr(match.mask->dst)) { 1135 if (!is_broadcast_ether_addr(match.mask->dst)) { 1136 NL_SET_ERR_MSG_MOD(extack, 1137 "Masked matching on destination MAC not supported"); 1138 err = -EINVAL; 1139 goto free_filter; 1140 } 1141 ether_addr_copy(filter->sid.dst_mac, match.key->dst); 1142 filter->sid.filtertype = STREAMID_TYPE_NULL; 1143 } 1144 1145 if (!is_zero_ether_addr(match.mask->src)) { 1146 if (!is_broadcast_ether_addr(match.mask->src)) { 1147 NL_SET_ERR_MSG_MOD(extack, 1148 "Masked matching on source MAC not supported"); 1149 err = -EINVAL; 1150 goto free_filter; 1151 } 1152 ether_addr_copy(filter->sid.src_mac, match.key->src); 1153 filter->sid.filtertype = STREAMID_TYPE_SMAC; 1154 } 1155 } else { 1156 NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS"); 1157 err = -EINVAL; 1158 goto free_filter; 1159 } 1160 1161 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 1162 struct flow_match_vlan match; 1163 1164 flow_rule_match_vlan(rule, &match); 1165 if (match.mask->vlan_priority) { 1166 if (match.mask->vlan_priority != 1167 (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) { 1168 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); 1169 err = -EINVAL; 1170 goto free_filter; 1171 } 1172 } 1173 1174 if (match.mask->vlan_id) { 1175 if (match.mask->vlan_id != VLAN_VID_MASK) { 1176 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id"); 1177 err = -EINVAL; 1178 goto free_filter; 1179 } 1180 1181 filter->sid.vid = match.key->vlan_id; 1182 if (!filter->sid.vid) 1183 filter->sid.tagged = STREAMID_VLAN_UNTAGGED; 1184 else 1185 filter->sid.tagged = STREAMID_VLAN_TAGGED; 1186 } 1187 } else { 1188 filter->sid.tagged = STREAMID_VLAN_ALL; 1189 } 1190 1191 /* parsing gate action */ 1192 if (entryg->gate.index >= priv->psfp_cap.max_psfp_gate) { 1193 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); 1194 err = -ENOSPC; 1195 goto free_filter; 1196 } 1197 1198 if (entryg->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) { 1199 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); 1200 err = -ENOSPC; 1201 goto free_filter; 1202 } 1203 1204 entries_size = struct_size(sgi, entries, entryg->gate.num_entries); 1205 sgi = kzalloc(entries_size, GFP_KERNEL); 1206 if (!sgi) { 1207 err = -ENOMEM; 1208 goto free_filter; 1209 } 1210 1211 refcount_set(&sgi->refcount, 1); 1212 sgi->index = entryg->gate.index; 1213 sgi->init_ipv = entryg->gate.prio; 1214 sgi->basetime = entryg->gate.basetime; 1215 sgi->cycletime = entryg->gate.cycletime; 1216 sgi->num_entries = entryg->gate.num_entries; 1217 1218 e = sgi->entries; 1219 for (i = 0; i < entryg->gate.num_entries; i++) { 1220 e[i].gate_state = entryg->gate.entries[i].gate_state; 1221 e[i].interval = entryg->gate.entries[i].interval; 1222 e[i].ipv = entryg->gate.entries[i].ipv; 1223 e[i].maxoctets = entryg->gate.entries[i].maxoctets; 1224 } 1225 1226 filter->sgi_index = sgi->index; 1227 1228 sfi = kzalloc(sizeof(*sfi), GFP_KERNEL); 1229 if (!sfi) { 1230 err = -ENOMEM; 1231 goto free_gate; 1232 } 1233 1234 refcount_set(&sfi->refcount, 1); 1235 sfi->gate_id = sgi->index; 1236 sfi->meter_id = ENETC_PSFP_WILDCARD; 1237 1238 /* Flow meter and max frame size */ 1239 if (entryp) { 1240 if (entryp->police.burst) { 1241 fmi = kzalloc(sizeof(*fmi), GFP_KERNEL); 1242 if (!fmi) { 1243 err = -ENOMEM; 1244 goto free_sfi; 1245 } 1246 refcount_set(&fmi->refcount, 1); 1247 fmi->cir = entryp->police.rate_bytes_ps; 1248 fmi->cbs = entryp->police.burst; 1249 fmi->index = entryp->police.index; 1250 filter->flags |= ENETC_PSFP_FLAGS_FMI; 1251 filter->fmi_index = fmi->index; 1252 sfi->meter_id = fmi->index; 1253 } 1254 1255 if (entryp->police.mtu) 1256 sfi->maxsdu = entryp->police.mtu; 1257 } 1258 1259 /* prio ref the filter prio */ 1260 if (f->common.prio && f->common.prio <= BIT(3)) 1261 sfi->prio = f->common.prio - 1; 1262 else 1263 sfi->prio = ENETC_PSFP_WILDCARD; 1264 1265 old_sfi = enetc_psfp_check_sfi(sfi); 1266 if (!old_sfi) { 1267 int index; 1268 1269 index = enetc_get_free_index(priv); 1270 if (sfi->handle < 0) { 1271 NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!"); 1272 err = -ENOSPC; 1273 goto free_fmi; 1274 } 1275 1276 sfi->index = index; 1277 sfi->handle = index + HANDLE_OFFSET; 1278 /* Update the stream filter handle also */ 1279 filter->sid.handle = sfi->handle; 1280 filter->sfi_index = sfi->index; 1281 sfi_overwrite = 0; 1282 } else { 1283 filter->sfi_index = old_sfi->index; 1284 filter->sid.handle = old_sfi->handle; 1285 sfi_overwrite = 1; 1286 } 1287 1288 err = enetc_psfp_hw_set(priv, &filter->sid, 1289 sfi_overwrite ? NULL : sfi, sgi, fmi); 1290 if (err) 1291 goto free_fmi; 1292 1293 spin_lock(&epsfp.psfp_lock); 1294 if (filter->flags & ENETC_PSFP_FLAGS_FMI) { 1295 old_fmi = enetc_get_meter_by_index(filter->fmi_index); 1296 if (old_fmi) { 1297 fmi->refcount = old_fmi->refcount; 1298 refcount_set(&fmi->refcount, 1299 refcount_read(&old_fmi->refcount) + 1); 1300 hlist_del(&old_fmi->node); 1301 kfree(old_fmi); 1302 } 1303 hlist_add_head(&fmi->node, &epsfp.psfp_meter_list); 1304 } 1305 1306 /* Remove the old node if exist and update with a new node */ 1307 old_sgi = enetc_get_gate_by_index(filter->sgi_index); 1308 if (old_sgi) { 1309 refcount_set(&sgi->refcount, 1310 refcount_read(&old_sgi->refcount) + 1); 1311 hlist_del(&old_sgi->node); 1312 kfree(old_sgi); 1313 } 1314 1315 hlist_add_head(&sgi->node, &epsfp.psfp_gate_list); 1316 1317 if (!old_sfi) { 1318 hlist_add_head(&sfi->node, &epsfp.psfp_filter_list); 1319 set_bit(sfi->index, epsfp.psfp_sfi_bitmap); 1320 } else { 1321 kfree(sfi); 1322 refcount_inc(&old_sfi->refcount); 1323 } 1324 1325 old_filter = enetc_get_stream_by_index(filter->sid.index); 1326 if (old_filter) 1327 remove_one_chain(priv, old_filter); 1328 1329 filter->stats.lastused = jiffies; 1330 hlist_add_head(&filter->node, &epsfp.stream_list); 1331 1332 spin_unlock(&epsfp.psfp_lock); 1333 1334 return 0; 1335 1336 free_fmi: 1337 kfree(fmi); 1338 free_sfi: 1339 kfree(sfi); 1340 free_gate: 1341 kfree(sgi); 1342 free_filter: 1343 kfree(filter); 1344 1345 return err; 1346 } 1347 1348 static int enetc_config_clsflower(struct enetc_ndev_priv *priv, 1349 struct flow_cls_offload *cls_flower) 1350 { 1351 struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); 1352 struct netlink_ext_ack *extack = cls_flower->common.extack; 1353 struct flow_dissector *dissector = rule->match.dissector; 1354 struct flow_action *action = &rule->action; 1355 struct flow_action_entry *entry; 1356 struct actions_fwd *fwd; 1357 u64 actions = 0; 1358 int i, err; 1359 1360 if (!flow_action_has_entries(action)) { 1361 NL_SET_ERR_MSG_MOD(extack, "At least one action is needed"); 1362 return -EINVAL; 1363 } 1364 1365 flow_action_for_each(i, entry, action) 1366 actions |= BIT(entry->id); 1367 1368 fwd = enetc_check_flow_actions(actions, dissector->used_keys); 1369 if (!fwd) { 1370 NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!"); 1371 return -EOPNOTSUPP; 1372 } 1373 1374 if (fwd->output & FILTER_ACTION_TYPE_PSFP) { 1375 err = enetc_psfp_parse_clsflower(priv, cls_flower); 1376 if (err) { 1377 NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs"); 1378 return err; 1379 } 1380 } else { 1381 NL_SET_ERR_MSG_MOD(extack, "Unsupported actions"); 1382 return -EOPNOTSUPP; 1383 } 1384 1385 return 0; 1386 } 1387 1388 static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv, 1389 struct flow_cls_offload *f) 1390 { 1391 struct enetc_stream_filter *filter; 1392 struct netlink_ext_ack *extack = f->common.extack; 1393 int err; 1394 1395 if (f->common.chain_index >= priv->psfp_cap.max_streamid) { 1396 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); 1397 return -ENOSPC; 1398 } 1399 1400 filter = enetc_get_stream_by_index(f->common.chain_index); 1401 if (!filter) 1402 return -EINVAL; 1403 1404 err = enetc_streamid_hw_set(priv, &filter->sid, false); 1405 if (err) 1406 return err; 1407 1408 remove_one_chain(priv, filter); 1409 1410 return 0; 1411 } 1412 1413 static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv, 1414 struct flow_cls_offload *f) 1415 { 1416 return enetc_psfp_destroy_clsflower(priv, f); 1417 } 1418 1419 static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv, 1420 struct flow_cls_offload *f) 1421 { 1422 struct psfp_streamfilter_counters counters = {}; 1423 struct enetc_stream_filter *filter; 1424 struct flow_stats stats = {}; 1425 int err; 1426 1427 filter = enetc_get_stream_by_index(f->common.chain_index); 1428 if (!filter) 1429 return -EINVAL; 1430 1431 err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters); 1432 if (err) 1433 return -EINVAL; 1434 1435 spin_lock(&epsfp.psfp_lock); 1436 stats.pkts = counters.matching_frames_count + 1437 counters.not_passing_sdu_count - 1438 filter->stats.pkts; 1439 stats.drops = counters.not_passing_frames_count + 1440 counters.not_passing_sdu_count + 1441 counters.red_frames_count - 1442 filter->stats.drops; 1443 stats.lastused = filter->stats.lastused; 1444 filter->stats.pkts += stats.pkts; 1445 filter->stats.drops += stats.drops; 1446 spin_unlock(&epsfp.psfp_lock); 1447 1448 flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, 1449 stats.lastused, FLOW_ACTION_HW_STATS_DELAYED); 1450 1451 return 0; 1452 } 1453 1454 static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv, 1455 struct flow_cls_offload *cls_flower) 1456 { 1457 switch (cls_flower->command) { 1458 case FLOW_CLS_REPLACE: 1459 return enetc_config_clsflower(priv, cls_flower); 1460 case FLOW_CLS_DESTROY: 1461 return enetc_destroy_clsflower(priv, cls_flower); 1462 case FLOW_CLS_STATS: 1463 return enetc_psfp_get_stats(priv, cls_flower); 1464 default: 1465 return -EOPNOTSUPP; 1466 } 1467 } 1468 1469 static inline void clean_psfp_sfi_bitmap(void) 1470 { 1471 bitmap_free(epsfp.psfp_sfi_bitmap); 1472 epsfp.psfp_sfi_bitmap = NULL; 1473 } 1474 1475 static void clean_stream_list(void) 1476 { 1477 struct enetc_stream_filter *s; 1478 struct hlist_node *tmp; 1479 1480 hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) { 1481 hlist_del(&s->node); 1482 kfree(s); 1483 } 1484 } 1485 1486 static void clean_sfi_list(void) 1487 { 1488 struct enetc_psfp_filter *sfi; 1489 struct hlist_node *tmp; 1490 1491 hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) { 1492 hlist_del(&sfi->node); 1493 kfree(sfi); 1494 } 1495 } 1496 1497 static void clean_sgi_list(void) 1498 { 1499 struct enetc_psfp_gate *sgi; 1500 struct hlist_node *tmp; 1501 1502 hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) { 1503 hlist_del(&sgi->node); 1504 kfree(sgi); 1505 } 1506 } 1507 1508 static void clean_psfp_all(void) 1509 { 1510 /* Disable all list nodes and free all memory */ 1511 clean_sfi_list(); 1512 clean_sgi_list(); 1513 clean_stream_list(); 1514 epsfp.dev_bitmap = 0; 1515 clean_psfp_sfi_bitmap(); 1516 } 1517 1518 int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1519 void *cb_priv) 1520 { 1521 struct net_device *ndev = cb_priv; 1522 1523 if (!tc_can_offload(ndev)) 1524 return -EOPNOTSUPP; 1525 1526 switch (type) { 1527 case TC_SETUP_CLSFLOWER: 1528 return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data); 1529 default: 1530 return -EOPNOTSUPP; 1531 } 1532 } 1533 1534 int enetc_psfp_init(struct enetc_ndev_priv *priv) 1535 { 1536 if (epsfp.psfp_sfi_bitmap) 1537 return 0; 1538 1539 epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter, 1540 GFP_KERNEL); 1541 if (!epsfp.psfp_sfi_bitmap) 1542 return -ENOMEM; 1543 1544 spin_lock_init(&epsfp.psfp_lock); 1545 1546 if (list_empty(&enetc_block_cb_list)) 1547 epsfp.dev_bitmap = 0; 1548 1549 return 0; 1550 } 1551 1552 int enetc_psfp_clean(struct enetc_ndev_priv *priv) 1553 { 1554 if (!list_empty(&enetc_block_cb_list)) 1555 return -EBUSY; 1556 1557 clean_psfp_all(); 1558 1559 return 0; 1560 } 1561 1562 int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data) 1563 { 1564 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1565 struct flow_block_offload *f = type_data; 1566 int err; 1567 1568 err = flow_block_cb_setup_simple(f, &enetc_block_cb_list, 1569 enetc_setup_tc_block_cb, 1570 ndev, ndev, true); 1571 if (err) 1572 return err; 1573 1574 switch (f->command) { 1575 case FLOW_BLOCK_BIND: 1576 set_bit(enetc_get_port(priv), &epsfp.dev_bitmap); 1577 break; 1578 case FLOW_BLOCK_UNBIND: 1579 clear_bit(enetc_get_port(priv), &epsfp.dev_bitmap); 1580 if (!epsfp.dev_bitmap) 1581 clean_psfp_all(); 1582 break; 1583 } 1584 1585 return 0; 1586 } 1587