1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2019 NXP */ 3 4 #include "enetc.h" 5 6 #include <net/pkt_sched.h> 7 #include <linux/math64.h> 8 #include <linux/refcount.h> 9 #include <net/pkt_cls.h> 10 #include <net/tc_act/tc_gate.h> 11 12 static u16 enetc_get_max_gcl_len(struct enetc_hw *hw) 13 { 14 return enetc_rd(hw, ENETC_PTGCAPR) & ENETC_PTGCAPR_MAX_GCL_LEN_MASK; 15 } 16 17 void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed) 18 { 19 struct enetc_hw *hw = &priv->si->hw; 20 u32 old_speed = priv->speed; 21 u32 pspeed, tmp; 22 23 if (speed == old_speed) 24 return; 25 26 switch (speed) { 27 case SPEED_1000: 28 pspeed = ENETC_PMR_PSPEED_1000M; 29 break; 30 case SPEED_2500: 31 pspeed = ENETC_PMR_PSPEED_2500M; 32 break; 33 case SPEED_100: 34 pspeed = ENETC_PMR_PSPEED_100M; 35 break; 36 case SPEED_10: 37 default: 38 pspeed = ENETC_PMR_PSPEED_10M; 39 } 40 41 priv->speed = speed; 42 tmp = enetc_port_rd(hw, ENETC_PMR); 43 enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed); 44 } 45 46 static int enetc_setup_taprio(struct enetc_ndev_priv *priv, 47 struct tc_taprio_qopt_offload *admin_conf) 48 { 49 struct enetc_hw *hw = &priv->si->hw; 50 struct enetc_cbd cbd = {.cmd = 0}; 51 struct tgs_gcl_conf *gcl_config; 52 struct tgs_gcl_data *gcl_data; 53 dma_addr_t dma; 54 struct gce *gce; 55 u16 data_size; 56 u16 gcl_len; 57 void *tmp; 58 u32 tge; 59 int err; 60 int i; 61 62 /* TSD and Qbv are mutually exclusive in hardware */ 63 for (i = 0; i < priv->num_tx_rings; i++) 64 if (priv->tx_ring[i]->tsd_enable) 65 return -EBUSY; 66 67 if (admin_conf->num_entries > enetc_get_max_gcl_len(hw)) 68 return -EINVAL; 69 70 if (admin_conf->cycle_time > U32_MAX || 71 admin_conf->cycle_time_extension > U32_MAX) 72 return -EINVAL; 73 74 /* Configure the (administrative) gate control list using the 75 * control BD descriptor. 76 */ 77 gcl_config = &cbd.gcl_conf; 78 gcl_len = admin_conf->num_entries; 79 80 data_size = struct_size(gcl_data, entry, gcl_len); 81 tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size, 82 &dma, (void *)&gcl_data); 83 if (!tmp) 84 return -ENOMEM; 85 86 gce = (struct gce *)(gcl_data + 1); 87 88 /* Set all gates open as default */ 89 gcl_config->atc = 0xff; 90 gcl_config->acl_len = cpu_to_le16(gcl_len); 91 92 gcl_data->btl = cpu_to_le32(lower_32_bits(admin_conf->base_time)); 93 gcl_data->bth = cpu_to_le32(upper_32_bits(admin_conf->base_time)); 94 gcl_data->ct = cpu_to_le32(admin_conf->cycle_time); 95 gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension); 96 97 for (i = 0; i < gcl_len; i++) { 98 struct tc_taprio_sched_entry *temp_entry; 99 struct gce *temp_gce = gce + i; 100 101 temp_entry = &admin_conf->entries[i]; 102 103 temp_gce->gate = (u8)temp_entry->gate_mask; 104 temp_gce->period = cpu_to_le32(temp_entry->interval); 105 } 106 107 cbd.status_flags = 0; 108 109 cbd.cls = BDCR_CMD_PORT_GCL; 110 cbd.status_flags = 0; 111 112 tge = enetc_rd(hw, ENETC_PTGCR); 113 enetc_wr(hw, ENETC_PTGCR, tge | ENETC_PTGCR_TGE); 114 115 err = enetc_send_cmd(priv->si, &cbd); 116 if (err) 117 enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE); 118 119 enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma); 120 121 if (err) 122 return err; 123 124 enetc_set_ptcmsdur(hw, admin_conf->max_sdu); 125 priv->active_offloads |= ENETC_F_QBV; 126 127 return 0; 128 } 129 130 static void enetc_reset_taprio(struct enetc_ndev_priv *priv) 131 { 132 struct enetc_hw *hw = &priv->si->hw; 133 u32 val; 134 135 val = enetc_rd(hw, ENETC_PTGCR); 136 enetc_wr(hw, ENETC_PTGCR, val & ~ENETC_PTGCR_TGE); 137 enetc_reset_ptcmsdur(hw); 138 139 priv->active_offloads &= ~ENETC_F_QBV; 140 } 141 142 static void enetc_taprio_destroy(struct net_device *ndev) 143 { 144 struct enetc_ndev_priv *priv = netdev_priv(ndev); 145 146 enetc_reset_taprio(priv); 147 enetc_reset_tc_mqprio(ndev); 148 } 149 150 static void enetc_taprio_stats(struct net_device *ndev, 151 struct tc_taprio_qopt_stats *stats) 152 { 153 struct enetc_ndev_priv *priv = netdev_priv(ndev); 154 u64 window_drops = 0; 155 int i; 156 157 for (i = 0; i < priv->num_tx_rings; i++) 158 window_drops += priv->tx_ring[i]->stats.win_drop; 159 160 stats->window_drops = window_drops; 161 } 162 163 static void enetc_taprio_tc_stats(struct net_device *ndev, 164 struct tc_taprio_qopt_tc_stats *tc_stats) 165 { 166 struct tc_taprio_qopt_stats *stats = &tc_stats->stats; 167 struct enetc_ndev_priv *priv = netdev_priv(ndev); 168 int tc = tc_stats->tc; 169 u64 window_drops = 0; 170 int i; 171 172 for (i = 0; i < priv->num_tx_rings; i++) 173 if (priv->tx_ring[i]->prio == tc) 174 window_drops += priv->tx_ring[i]->stats.win_drop; 175 176 stats->window_drops = window_drops; 177 } 178 179 static int enetc_taprio_replace(struct net_device *ndev, 180 struct tc_taprio_qopt_offload *offload) 181 { 182 struct enetc_ndev_priv *priv = netdev_priv(ndev); 183 int err; 184 185 err = enetc_setup_tc_mqprio(ndev, &offload->mqprio); 186 if (err) 187 return err; 188 189 err = enetc_setup_taprio(priv, offload); 190 if (err) 191 enetc_reset_tc_mqprio(ndev); 192 193 return err; 194 } 195 196 int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) 197 { 198 struct tc_taprio_qopt_offload *offload = type_data; 199 int err = 0; 200 201 switch (offload->cmd) { 202 case TAPRIO_CMD_REPLACE: 203 err = enetc_taprio_replace(ndev, offload); 204 break; 205 case TAPRIO_CMD_DESTROY: 206 enetc_taprio_destroy(ndev); 207 break; 208 case TAPRIO_CMD_STATS: 209 enetc_taprio_stats(ndev, &offload->stats); 210 break; 211 case TAPRIO_CMD_TC_STATS: 212 enetc_taprio_tc_stats(ndev, &offload->tc_stats); 213 break; 214 default: 215 err = -EOPNOTSUPP; 216 } 217 218 return err; 219 } 220 221 static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc) 222 { 223 return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE; 224 } 225 226 static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc) 227 { 228 return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK; 229 } 230 231 int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) 232 { 233 struct enetc_ndev_priv *priv = netdev_priv(ndev); 234 struct tc_cbs_qopt_offload *cbs = type_data; 235 u32 port_transmit_rate = priv->speed; 236 u8 tc_nums = netdev_get_num_tc(ndev); 237 struct enetc_hw *hw = &priv->si->hw; 238 u32 hi_credit_bit, hi_credit_reg; 239 u32 max_interference_size; 240 u32 port_frame_max_size; 241 u8 tc = cbs->queue; 242 u8 prio_top, prio_next; 243 int bw_sum = 0; 244 u8 bw; 245 246 prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1); 247 prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2); 248 249 /* Support highest prio and second prio tc in cbs mode */ 250 if (tc != prio_top && tc != prio_next) 251 return -EOPNOTSUPP; 252 253 if (!cbs->enable) { 254 /* Make sure the other TC that are numerically 255 * lower than this TC have been disabled. 256 */ 257 if (tc == prio_top && 258 enetc_get_cbs_enable(hw, prio_next)) { 259 dev_err(&ndev->dev, 260 "Disable TC%d before disable TC%d\n", 261 prio_next, tc); 262 return -EINVAL; 263 } 264 265 enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0); 266 enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0); 267 268 return 0; 269 } 270 271 if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L || 272 cbs->idleslope < 0 || cbs->sendslope > 0) 273 return -EOPNOTSUPP; 274 275 port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 276 277 bw = cbs->idleslope / (port_transmit_rate * 10UL); 278 279 /* Make sure the other TC that are numerically 280 * higher than this TC have been enabled. 281 */ 282 if (tc == prio_next) { 283 if (!enetc_get_cbs_enable(hw, prio_top)) { 284 dev_err(&ndev->dev, 285 "Enable TC%d first before enable TC%d\n", 286 prio_top, prio_next); 287 return -EINVAL; 288 } 289 bw_sum += enetc_get_cbs_bw(hw, prio_top); 290 } 291 292 if (bw_sum + bw >= 100) { 293 dev_err(&ndev->dev, 294 "The sum of all CBS Bandwidth can't exceed 100\n"); 295 return -EINVAL; 296 } 297 298 enetc_port_rd(hw, ENETC_PTCMSDUR(tc)); 299 300 /* For top prio TC, the max_interfrence_size is maxSizedFrame. 301 * 302 * For next prio TC, the max_interfrence_size is calculated as below: 303 * 304 * max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra) 305 * 306 * - RA: idleSlope for AVB Class A 307 * - R0: port transmit rate 308 * - M0: maximum sized frame for the port 309 * - MA: maximum sized frame for AVB Class A 310 */ 311 312 if (tc == prio_top) { 313 max_interference_size = port_frame_max_size * 8; 314 } else { 315 u32 m0, ma, r0, ra; 316 317 m0 = port_frame_max_size * 8; 318 ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8; 319 ra = enetc_get_cbs_bw(hw, prio_top) * 320 port_transmit_rate * 10000ULL; 321 r0 = port_transmit_rate * 1000000ULL; 322 max_interference_size = m0 + ma + 323 (u32)div_u64((u64)ra * m0, r0 - ra); 324 } 325 326 /* hiCredit bits calculate by: 327 * 328 * maxSizedFrame * (idleSlope/portTxRate) 329 */ 330 hi_credit_bit = max_interference_size * bw / 100; 331 332 /* hiCredit bits to hiCredit register need to calculated as: 333 * 334 * (enetClockFrequency / portTransmitRate) * 100 335 */ 336 hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit, 337 port_transmit_rate * 1000000ULL); 338 339 enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg); 340 341 /* Set bw register and enable this traffic class */ 342 enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE); 343 344 return 0; 345 } 346 347 int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) 348 { 349 struct enetc_ndev_priv *priv = netdev_priv(ndev); 350 struct tc_etf_qopt_offload *qopt = type_data; 351 u8 tc_nums = netdev_get_num_tc(ndev); 352 struct enetc_hw *hw = &priv->si->hw; 353 int tc; 354 355 if (!tc_nums) 356 return -EOPNOTSUPP; 357 358 tc = qopt->queue; 359 360 if (tc < 0 || tc >= priv->num_tx_rings) 361 return -EINVAL; 362 363 /* TSD and Qbv are mutually exclusive in hardware */ 364 if (enetc_rd(hw, ENETC_PTGCR) & ENETC_PTGCR_TGE) 365 return -EBUSY; 366 367 priv->tx_ring[tc]->tsd_enable = qopt->enable; 368 enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0); 369 370 return 0; 371 } 372 373 enum streamid_type { 374 STREAMID_TYPE_RESERVED = 0, 375 STREAMID_TYPE_NULL, 376 STREAMID_TYPE_SMAC, 377 }; 378 379 enum streamid_vlan_tagged { 380 STREAMID_VLAN_RESERVED = 0, 381 STREAMID_VLAN_TAGGED, 382 STREAMID_VLAN_UNTAGGED, 383 STREAMID_VLAN_ALL, 384 }; 385 386 #define ENETC_PSFP_WILDCARD -1 387 #define HANDLE_OFFSET 100 388 389 enum forward_type { 390 FILTER_ACTION_TYPE_PSFP = BIT(0), 391 FILTER_ACTION_TYPE_ACL = BIT(1), 392 FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0), 393 }; 394 395 /* This is for limit output type for input actions */ 396 struct actions_fwd { 397 u64 actions; 398 u64 keys; /* include the must needed keys */ 399 enum forward_type output; 400 }; 401 402 struct psfp_streamfilter_counters { 403 u64 matching_frames_count; 404 u64 passing_frames_count; 405 u64 not_passing_frames_count; 406 u64 passing_sdu_count; 407 u64 not_passing_sdu_count; 408 u64 red_frames_count; 409 }; 410 411 struct enetc_streamid { 412 u32 index; 413 union { 414 u8 src_mac[6]; 415 u8 dst_mac[6]; 416 }; 417 u8 filtertype; 418 u16 vid; 419 u8 tagged; 420 s32 handle; 421 }; 422 423 struct enetc_psfp_filter { 424 u32 index; 425 s32 handle; 426 s8 prio; 427 u32 maxsdu; 428 u32 gate_id; 429 s32 meter_id; 430 refcount_t refcount; 431 struct hlist_node node; 432 }; 433 434 struct enetc_psfp_gate { 435 u32 index; 436 s8 init_ipv; 437 u64 basetime; 438 u64 cycletime; 439 u64 cycletimext; 440 u32 num_entries; 441 refcount_t refcount; 442 struct hlist_node node; 443 struct action_gate_entry entries[]; 444 }; 445 446 /* Only enable the green color frame now 447 * Will add eir and ebs color blind, couple flag etc when 448 * policing action add more offloading parameters 449 */ 450 struct enetc_psfp_meter { 451 u32 index; 452 u32 cir; 453 u32 cbs; 454 refcount_t refcount; 455 struct hlist_node node; 456 }; 457 458 #define ENETC_PSFP_FLAGS_FMI BIT(0) 459 460 struct enetc_stream_filter { 461 struct enetc_streamid sid; 462 u32 sfi_index; 463 u32 sgi_index; 464 u32 flags; 465 u32 fmi_index; 466 struct flow_stats stats; 467 struct hlist_node node; 468 }; 469 470 struct enetc_psfp { 471 unsigned long dev_bitmap; 472 unsigned long *psfp_sfi_bitmap; 473 struct hlist_head stream_list; 474 struct hlist_head psfp_filter_list; 475 struct hlist_head psfp_gate_list; 476 struct hlist_head psfp_meter_list; 477 spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */ 478 }; 479 480 static struct actions_fwd enetc_act_fwd[] = { 481 { 482 BIT(FLOW_ACTION_GATE), 483 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), 484 FILTER_ACTION_TYPE_PSFP 485 }, 486 { 487 BIT(FLOW_ACTION_POLICE) | 488 BIT(FLOW_ACTION_GATE), 489 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), 490 FILTER_ACTION_TYPE_PSFP 491 }, 492 /* example for ACL actions */ 493 { 494 BIT(FLOW_ACTION_DROP), 495 0, 496 FILTER_ACTION_TYPE_ACL 497 } 498 }; 499 500 static struct enetc_psfp epsfp = { 501 .dev_bitmap = 0, 502 .psfp_sfi_bitmap = NULL, 503 }; 504 505 static LIST_HEAD(enetc_block_cb_list); 506 507 /* Stream Identity Entry Set Descriptor */ 508 static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, 509 struct enetc_streamid *sid, 510 u8 enable) 511 { 512 struct enetc_cbd cbd = {.cmd = 0}; 513 struct streamid_data *si_data; 514 struct streamid_conf *si_conf; 515 dma_addr_t dma; 516 u16 data_size; 517 void *tmp; 518 int port; 519 int err; 520 521 port = enetc_pf_to_port(priv->si->pdev); 522 if (port < 0) 523 return -EINVAL; 524 525 if (sid->index >= priv->psfp_cap.max_streamid) 526 return -EINVAL; 527 528 if (sid->filtertype != STREAMID_TYPE_NULL && 529 sid->filtertype != STREAMID_TYPE_SMAC) 530 return -EOPNOTSUPP; 531 532 /* Disable operation before enable */ 533 cbd.index = cpu_to_le16((u16)sid->index); 534 cbd.cls = BDCR_CMD_STREAM_IDENTIFY; 535 cbd.status_flags = 0; 536 537 data_size = sizeof(struct streamid_data); 538 tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size, 539 &dma, (void *)&si_data); 540 if (!tmp) 541 return -ENOMEM; 542 543 eth_broadcast_addr(si_data->dmac); 544 si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK 545 + ((0x3 << 14) | ENETC_CBDR_SID_VIDM)); 546 547 si_conf = &cbd.sid_set; 548 /* Only one port supported for one entry, set itself */ 549 si_conf->iports = cpu_to_le32(1 << port); 550 si_conf->id_type = 1; 551 si_conf->oui[2] = 0x0; 552 si_conf->oui[1] = 0x80; 553 si_conf->oui[0] = 0xC2; 554 555 err = enetc_send_cmd(priv->si, &cbd); 556 if (err) 557 goto out; 558 559 if (!enable) 560 goto out; 561 562 /* Enable the entry overwrite again incase space flushed by hardware */ 563 cbd.status_flags = 0; 564 565 si_conf->en = 0x80; 566 si_conf->stream_handle = cpu_to_le32(sid->handle); 567 si_conf->iports = cpu_to_le32(1 << port); 568 si_conf->id_type = sid->filtertype; 569 si_conf->oui[2] = 0x0; 570 si_conf->oui[1] = 0x80; 571 si_conf->oui[0] = 0xC2; 572 573 memset(si_data, 0, data_size); 574 575 /* VIDM default to be 1. 576 * VID Match. If set (b1) then the VID must match, otherwise 577 * any VID is considered a match. VIDM setting is only used 578 * when TG is set to b01. 579 */ 580 if (si_conf->id_type == STREAMID_TYPE_NULL) { 581 ether_addr_copy(si_data->dmac, sid->dst_mac); 582 si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) + 583 ((((u16)(sid->tagged) & 0x3) << 14) 584 | ENETC_CBDR_SID_VIDM); 585 } else if (si_conf->id_type == STREAMID_TYPE_SMAC) { 586 ether_addr_copy(si_data->smac, sid->src_mac); 587 si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) + 588 ((((u16)(sid->tagged) & 0x3) << 14) 589 | ENETC_CBDR_SID_VIDM); 590 } 591 592 err = enetc_send_cmd(priv->si, &cbd); 593 out: 594 enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma); 595 596 return err; 597 } 598 599 /* Stream Filter Instance Set Descriptor */ 600 static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv, 601 struct enetc_psfp_filter *sfi, 602 u8 enable) 603 { 604 struct enetc_cbd cbd = {.cmd = 0}; 605 struct sfi_conf *sfi_config; 606 int port; 607 608 port = enetc_pf_to_port(priv->si->pdev); 609 if (port < 0) 610 return -EINVAL; 611 612 cbd.index = cpu_to_le16(sfi->index); 613 cbd.cls = BDCR_CMD_STREAM_FILTER; 614 cbd.status_flags = 0x80; 615 cbd.length = cpu_to_le16(1); 616 617 sfi_config = &cbd.sfi_conf; 618 if (!enable) 619 goto exit; 620 621 sfi_config->en = 0x80; 622 623 if (sfi->handle >= 0) { 624 sfi_config->stream_handle = 625 cpu_to_le32(sfi->handle); 626 sfi_config->sthm |= 0x80; 627 } 628 629 sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id); 630 sfi_config->input_ports = cpu_to_le32(1 << port); 631 632 /* The priority value which may be matched against the 633 * frame’s priority value to determine a match for this entry. 634 */ 635 if (sfi->prio >= 0) 636 sfi_config->multi |= (sfi->prio & 0x7) | 0x8; 637 638 /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX 639 * field as being either an MSDU value or an index into the Flow 640 * Meter Instance table. 641 */ 642 if (sfi->maxsdu) { 643 sfi_config->msdu = 644 cpu_to_le16(sfi->maxsdu); 645 sfi_config->multi |= 0x40; 646 } 647 648 if (sfi->meter_id >= 0) { 649 sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id); 650 sfi_config->multi |= 0x80; 651 } 652 653 exit: 654 return enetc_send_cmd(priv->si, &cbd); 655 } 656 657 static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv, 658 u32 index, 659 struct psfp_streamfilter_counters *cnt) 660 { 661 struct enetc_cbd cbd = { .cmd = 2 }; 662 struct sfi_counter_data *data_buf; 663 dma_addr_t dma; 664 u16 data_size; 665 void *tmp; 666 int err; 667 668 cbd.index = cpu_to_le16((u16)index); 669 cbd.cmd = 2; 670 cbd.cls = BDCR_CMD_STREAM_FILTER; 671 cbd.status_flags = 0; 672 673 data_size = sizeof(struct sfi_counter_data); 674 675 tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size, 676 &dma, (void *)&data_buf); 677 if (!tmp) 678 return -ENOMEM; 679 680 err = enetc_send_cmd(priv->si, &cbd); 681 if (err) 682 goto exit; 683 684 cnt->matching_frames_count = ((u64)data_buf->matchh << 32) + 685 data_buf->matchl; 686 687 cnt->not_passing_sdu_count = ((u64)data_buf->msdu_droph << 32) + 688 data_buf->msdu_dropl; 689 690 cnt->passing_sdu_count = cnt->matching_frames_count 691 - cnt->not_passing_sdu_count; 692 693 cnt->not_passing_frames_count = 694 ((u64)data_buf->stream_gate_droph << 32) + 695 data_buf->stream_gate_dropl; 696 697 cnt->passing_frames_count = cnt->matching_frames_count - 698 cnt->not_passing_sdu_count - 699 cnt->not_passing_frames_count; 700 701 cnt->red_frames_count = ((u64)data_buf->flow_meter_droph << 32) + 702 data_buf->flow_meter_dropl; 703 704 exit: 705 enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma); 706 707 return err; 708 } 709 710 static u64 get_ptp_now(struct enetc_hw *hw) 711 { 712 u64 now_lo, now_hi, now; 713 714 now_lo = enetc_rd(hw, ENETC_SICTR0); 715 now_hi = enetc_rd(hw, ENETC_SICTR1); 716 now = now_lo | now_hi << 32; 717 718 return now; 719 } 720 721 static int get_start_ns(u64 now, u64 cycle, u64 *start) 722 { 723 u64 n; 724 725 if (!cycle) 726 return -EFAULT; 727 728 n = div64_u64(now, cycle); 729 730 *start = (n + 1) * cycle; 731 732 return 0; 733 } 734 735 /* Stream Gate Instance Set Descriptor */ 736 static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, 737 struct enetc_psfp_gate *sgi, 738 u8 enable) 739 { 740 struct enetc_cbd cbd = { .cmd = 0 }; 741 struct sgi_table *sgi_config; 742 struct sgcl_conf *sgcl_config; 743 struct sgcl_data *sgcl_data; 744 struct sgce *sgce; 745 dma_addr_t dma; 746 u16 data_size; 747 int err, i; 748 void *tmp; 749 u64 now; 750 751 cbd.index = cpu_to_le16(sgi->index); 752 cbd.cmd = 0; 753 cbd.cls = BDCR_CMD_STREAM_GCL; 754 cbd.status_flags = 0x80; 755 756 /* disable */ 757 if (!enable) 758 return enetc_send_cmd(priv->si, &cbd); 759 760 if (!sgi->num_entries) 761 return 0; 762 763 if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist || 764 !sgi->cycletime) 765 return -EINVAL; 766 767 /* enable */ 768 sgi_config = &cbd.sgi_table; 769 770 /* Keep open before gate list start */ 771 sgi_config->ocgtst = 0x80; 772 773 sgi_config->oipv = (sgi->init_ipv < 0) ? 774 0x0 : ((sgi->init_ipv & 0x7) | 0x8); 775 776 sgi_config->en = 0x80; 777 778 /* Basic config */ 779 err = enetc_send_cmd(priv->si, &cbd); 780 if (err) 781 return -EINVAL; 782 783 memset(&cbd, 0, sizeof(cbd)); 784 785 cbd.index = cpu_to_le16(sgi->index); 786 cbd.cmd = 1; 787 cbd.cls = BDCR_CMD_STREAM_GCL; 788 cbd.status_flags = 0; 789 790 sgcl_config = &cbd.sgcl_conf; 791 792 sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3; 793 794 data_size = struct_size(sgcl_data, sgcl, sgi->num_entries); 795 tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size, 796 &dma, (void *)&sgcl_data); 797 if (!tmp) 798 return -ENOMEM; 799 800 sgce = &sgcl_data->sgcl[0]; 801 802 sgcl_config->agtst = 0x80; 803 804 sgcl_data->ct = sgi->cycletime; 805 sgcl_data->cte = sgi->cycletimext; 806 807 if (sgi->init_ipv >= 0) 808 sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8; 809 810 for (i = 0; i < sgi->num_entries; i++) { 811 struct action_gate_entry *from = &sgi->entries[i]; 812 struct sgce *to = &sgce[i]; 813 814 if (from->gate_state) 815 to->multi |= 0x10; 816 817 if (from->ipv >= 0) 818 to->multi |= ((from->ipv & 0x7) << 5) | 0x08; 819 820 if (from->maxoctets >= 0) { 821 to->multi |= 0x01; 822 to->msdu[0] = from->maxoctets & 0xFF; 823 to->msdu[1] = (from->maxoctets >> 8) & 0xFF; 824 to->msdu[2] = (from->maxoctets >> 16) & 0xFF; 825 } 826 827 to->interval = from->interval; 828 } 829 830 /* If basetime is less than now, calculate start time */ 831 now = get_ptp_now(&priv->si->hw); 832 833 if (sgi->basetime < now) { 834 u64 start; 835 836 err = get_start_ns(now, sgi->cycletime, &start); 837 if (err) 838 goto exit; 839 sgcl_data->btl = lower_32_bits(start); 840 sgcl_data->bth = upper_32_bits(start); 841 } else { 842 u32 hi, lo; 843 844 hi = upper_32_bits(sgi->basetime); 845 lo = lower_32_bits(sgi->basetime); 846 sgcl_data->bth = hi; 847 sgcl_data->btl = lo; 848 } 849 850 err = enetc_send_cmd(priv->si, &cbd); 851 852 exit: 853 enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma); 854 return err; 855 } 856 857 static int enetc_flowmeter_hw_set(struct enetc_ndev_priv *priv, 858 struct enetc_psfp_meter *fmi, 859 u8 enable) 860 { 861 struct enetc_cbd cbd = { .cmd = 0 }; 862 struct fmi_conf *fmi_config; 863 u64 temp = 0; 864 865 cbd.index = cpu_to_le16((u16)fmi->index); 866 cbd.cls = BDCR_CMD_FLOW_METER; 867 cbd.status_flags = 0x80; 868 869 if (!enable) 870 return enetc_send_cmd(priv->si, &cbd); 871 872 fmi_config = &cbd.fmi_conf; 873 fmi_config->en = 0x80; 874 875 if (fmi->cir) { 876 temp = (u64)8000 * fmi->cir; 877 temp = div_u64(temp, 3725); 878 } 879 880 fmi_config->cir = cpu_to_le32((u32)temp); 881 fmi_config->cbs = cpu_to_le32(fmi->cbs); 882 883 /* Default for eir ebs disable */ 884 fmi_config->eir = 0; 885 fmi_config->ebs = 0; 886 887 /* Default: 888 * mark red disable 889 * drop on yellow disable 890 * color mode disable 891 * couple flag disable 892 */ 893 fmi_config->conf = 0; 894 895 return enetc_send_cmd(priv->si, &cbd); 896 } 897 898 static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index) 899 { 900 struct enetc_stream_filter *f; 901 902 hlist_for_each_entry(f, &epsfp.stream_list, node) 903 if (f->sid.index == index) 904 return f; 905 906 return NULL; 907 } 908 909 static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index) 910 { 911 struct enetc_psfp_gate *g; 912 913 hlist_for_each_entry(g, &epsfp.psfp_gate_list, node) 914 if (g->index == index) 915 return g; 916 917 return NULL; 918 } 919 920 static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index) 921 { 922 struct enetc_psfp_filter *s; 923 924 hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) 925 if (s->index == index) 926 return s; 927 928 return NULL; 929 } 930 931 static struct enetc_psfp_meter *enetc_get_meter_by_index(u32 index) 932 { 933 struct enetc_psfp_meter *m; 934 935 hlist_for_each_entry(m, &epsfp.psfp_meter_list, node) 936 if (m->index == index) 937 return m; 938 939 return NULL; 940 } 941 942 static struct enetc_psfp_filter 943 *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi) 944 { 945 struct enetc_psfp_filter *s; 946 947 hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) 948 if (s->gate_id == sfi->gate_id && 949 s->prio == sfi->prio && 950 s->maxsdu == sfi->maxsdu && 951 s->meter_id == sfi->meter_id) 952 return s; 953 954 return NULL; 955 } 956 957 static int enetc_get_free_index(struct enetc_ndev_priv *priv) 958 { 959 u32 max_size = priv->psfp_cap.max_psfp_filter; 960 unsigned long index; 961 962 index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size); 963 if (index == max_size) 964 return -1; 965 966 return index; 967 } 968 969 static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index) 970 { 971 struct enetc_psfp_filter *sfi; 972 u8 z; 973 974 sfi = enetc_get_filter_by_index(index); 975 WARN_ON(!sfi); 976 z = refcount_dec_and_test(&sfi->refcount); 977 978 if (z) { 979 enetc_streamfilter_hw_set(priv, sfi, false); 980 hlist_del(&sfi->node); 981 kfree(sfi); 982 clear_bit(index, epsfp.psfp_sfi_bitmap); 983 } 984 } 985 986 static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index) 987 { 988 struct enetc_psfp_gate *sgi; 989 u8 z; 990 991 sgi = enetc_get_gate_by_index(index); 992 WARN_ON(!sgi); 993 z = refcount_dec_and_test(&sgi->refcount); 994 if (z) { 995 enetc_streamgate_hw_set(priv, sgi, false); 996 hlist_del(&sgi->node); 997 kfree(sgi); 998 } 999 } 1000 1001 static void flow_meter_unref(struct enetc_ndev_priv *priv, u32 index) 1002 { 1003 struct enetc_psfp_meter *fmi; 1004 u8 z; 1005 1006 fmi = enetc_get_meter_by_index(index); 1007 WARN_ON(!fmi); 1008 z = refcount_dec_and_test(&fmi->refcount); 1009 if (z) { 1010 enetc_flowmeter_hw_set(priv, fmi, false); 1011 hlist_del(&fmi->node); 1012 kfree(fmi); 1013 } 1014 } 1015 1016 static void remove_one_chain(struct enetc_ndev_priv *priv, 1017 struct enetc_stream_filter *filter) 1018 { 1019 if (filter->flags & ENETC_PSFP_FLAGS_FMI) 1020 flow_meter_unref(priv, filter->fmi_index); 1021 1022 stream_gate_unref(priv, filter->sgi_index); 1023 stream_filter_unref(priv, filter->sfi_index); 1024 1025 hlist_del(&filter->node); 1026 kfree(filter); 1027 } 1028 1029 static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv, 1030 struct enetc_streamid *sid, 1031 struct enetc_psfp_filter *sfi, 1032 struct enetc_psfp_gate *sgi, 1033 struct enetc_psfp_meter *fmi) 1034 { 1035 int err; 1036 1037 err = enetc_streamid_hw_set(priv, sid, true); 1038 if (err) 1039 return err; 1040 1041 if (sfi) { 1042 err = enetc_streamfilter_hw_set(priv, sfi, true); 1043 if (err) 1044 goto revert_sid; 1045 } 1046 1047 err = enetc_streamgate_hw_set(priv, sgi, true); 1048 if (err) 1049 goto revert_sfi; 1050 1051 if (fmi) { 1052 err = enetc_flowmeter_hw_set(priv, fmi, true); 1053 if (err) 1054 goto revert_sgi; 1055 } 1056 1057 return 0; 1058 1059 revert_sgi: 1060 enetc_streamgate_hw_set(priv, sgi, false); 1061 revert_sfi: 1062 if (sfi) 1063 enetc_streamfilter_hw_set(priv, sfi, false); 1064 revert_sid: 1065 enetc_streamid_hw_set(priv, sid, false); 1066 return err; 1067 } 1068 1069 static struct actions_fwd *enetc_check_flow_actions(u64 acts, 1070 unsigned int inputkeys) 1071 { 1072 int i; 1073 1074 for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++) 1075 if (acts == enetc_act_fwd[i].actions && 1076 inputkeys & enetc_act_fwd[i].keys) 1077 return &enetc_act_fwd[i]; 1078 1079 return NULL; 1080 } 1081 1082 static int enetc_psfp_policer_validate(const struct flow_action *action, 1083 const struct flow_action_entry *act, 1084 struct netlink_ext_ack *extack) 1085 { 1086 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 1087 NL_SET_ERR_MSG_MOD(extack, 1088 "Offload not supported when exceed action is not drop"); 1089 return -EOPNOTSUPP; 1090 } 1091 1092 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 1093 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 1094 NL_SET_ERR_MSG_MOD(extack, 1095 "Offload not supported when conform action is not pipe or ok"); 1096 return -EOPNOTSUPP; 1097 } 1098 1099 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 1100 !flow_action_is_last_entry(action, act)) { 1101 NL_SET_ERR_MSG_MOD(extack, 1102 "Offload not supported when conform action is ok, but action is not last"); 1103 return -EOPNOTSUPP; 1104 } 1105 1106 if (act->police.peakrate_bytes_ps || 1107 act->police.avrate || act->police.overhead) { 1108 NL_SET_ERR_MSG_MOD(extack, 1109 "Offload not supported when peakrate/avrate/overhead is configured"); 1110 return -EOPNOTSUPP; 1111 } 1112 1113 if (act->police.rate_pkt_ps) { 1114 NL_SET_ERR_MSG_MOD(extack, 1115 "QoS offload not support packets per second"); 1116 return -EOPNOTSUPP; 1117 } 1118 1119 return 0; 1120 } 1121 1122 static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, 1123 struct flow_cls_offload *f) 1124 { 1125 struct flow_action_entry *entryg = NULL, *entryp = NULL; 1126 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 1127 struct netlink_ext_ack *extack = f->common.extack; 1128 struct enetc_stream_filter *filter, *old_filter; 1129 struct enetc_psfp_meter *fmi = NULL, *old_fmi; 1130 struct enetc_psfp_filter *sfi, *old_sfi; 1131 struct enetc_psfp_gate *sgi, *old_sgi; 1132 struct flow_action_entry *entry; 1133 struct action_gate_entry *e; 1134 u8 sfi_overwrite = 0; 1135 int entries_size; 1136 int i, err; 1137 1138 if (f->common.chain_index >= priv->psfp_cap.max_streamid) { 1139 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); 1140 return -ENOSPC; 1141 } 1142 1143 flow_action_for_each(i, entry, &rule->action) 1144 if (entry->id == FLOW_ACTION_GATE) 1145 entryg = entry; 1146 else if (entry->id == FLOW_ACTION_POLICE) 1147 entryp = entry; 1148 1149 /* Not support without gate action */ 1150 if (!entryg) 1151 return -EINVAL; 1152 1153 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 1154 if (!filter) 1155 return -ENOMEM; 1156 1157 filter->sid.index = f->common.chain_index; 1158 1159 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 1160 struct flow_match_eth_addrs match; 1161 1162 flow_rule_match_eth_addrs(rule, &match); 1163 1164 if (!is_zero_ether_addr(match.mask->dst) && 1165 !is_zero_ether_addr(match.mask->src)) { 1166 NL_SET_ERR_MSG_MOD(extack, 1167 "Cannot match on both source and destination MAC"); 1168 err = -EINVAL; 1169 goto free_filter; 1170 } 1171 1172 if (!is_zero_ether_addr(match.mask->dst)) { 1173 if (!is_broadcast_ether_addr(match.mask->dst)) { 1174 NL_SET_ERR_MSG_MOD(extack, 1175 "Masked matching on destination MAC not supported"); 1176 err = -EINVAL; 1177 goto free_filter; 1178 } 1179 ether_addr_copy(filter->sid.dst_mac, match.key->dst); 1180 filter->sid.filtertype = STREAMID_TYPE_NULL; 1181 } 1182 1183 if (!is_zero_ether_addr(match.mask->src)) { 1184 if (!is_broadcast_ether_addr(match.mask->src)) { 1185 NL_SET_ERR_MSG_MOD(extack, 1186 "Masked matching on source MAC not supported"); 1187 err = -EINVAL; 1188 goto free_filter; 1189 } 1190 ether_addr_copy(filter->sid.src_mac, match.key->src); 1191 filter->sid.filtertype = STREAMID_TYPE_SMAC; 1192 } 1193 } else { 1194 NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS"); 1195 err = -EINVAL; 1196 goto free_filter; 1197 } 1198 1199 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 1200 struct flow_match_vlan match; 1201 1202 flow_rule_match_vlan(rule, &match); 1203 if (match.mask->vlan_priority) { 1204 if (match.mask->vlan_priority != 1205 (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) { 1206 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); 1207 err = -EINVAL; 1208 goto free_filter; 1209 } 1210 } 1211 1212 if (match.mask->vlan_id) { 1213 if (match.mask->vlan_id != VLAN_VID_MASK) { 1214 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id"); 1215 err = -EINVAL; 1216 goto free_filter; 1217 } 1218 1219 filter->sid.vid = match.key->vlan_id; 1220 if (!filter->sid.vid) 1221 filter->sid.tagged = STREAMID_VLAN_UNTAGGED; 1222 else 1223 filter->sid.tagged = STREAMID_VLAN_TAGGED; 1224 } 1225 } else { 1226 filter->sid.tagged = STREAMID_VLAN_ALL; 1227 } 1228 1229 /* parsing gate action */ 1230 if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) { 1231 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); 1232 err = -ENOSPC; 1233 goto free_filter; 1234 } 1235 1236 if (entryg->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) { 1237 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); 1238 err = -ENOSPC; 1239 goto free_filter; 1240 } 1241 1242 entries_size = struct_size(sgi, entries, entryg->gate.num_entries); 1243 sgi = kzalloc(entries_size, GFP_KERNEL); 1244 if (!sgi) { 1245 err = -ENOMEM; 1246 goto free_filter; 1247 } 1248 1249 refcount_set(&sgi->refcount, 1); 1250 sgi->index = entryg->hw_index; 1251 sgi->init_ipv = entryg->gate.prio; 1252 sgi->basetime = entryg->gate.basetime; 1253 sgi->cycletime = entryg->gate.cycletime; 1254 sgi->num_entries = entryg->gate.num_entries; 1255 1256 e = sgi->entries; 1257 for (i = 0; i < entryg->gate.num_entries; i++) { 1258 e[i].gate_state = entryg->gate.entries[i].gate_state; 1259 e[i].interval = entryg->gate.entries[i].interval; 1260 e[i].ipv = entryg->gate.entries[i].ipv; 1261 e[i].maxoctets = entryg->gate.entries[i].maxoctets; 1262 } 1263 1264 filter->sgi_index = sgi->index; 1265 1266 sfi = kzalloc(sizeof(*sfi), GFP_KERNEL); 1267 if (!sfi) { 1268 err = -ENOMEM; 1269 goto free_gate; 1270 } 1271 1272 refcount_set(&sfi->refcount, 1); 1273 sfi->gate_id = sgi->index; 1274 sfi->meter_id = ENETC_PSFP_WILDCARD; 1275 1276 /* Flow meter and max frame size */ 1277 if (entryp) { 1278 err = enetc_psfp_policer_validate(&rule->action, entryp, extack); 1279 if (err) 1280 goto free_sfi; 1281 1282 if (entryp->police.burst) { 1283 fmi = kzalloc(sizeof(*fmi), GFP_KERNEL); 1284 if (!fmi) { 1285 err = -ENOMEM; 1286 goto free_sfi; 1287 } 1288 refcount_set(&fmi->refcount, 1); 1289 fmi->cir = entryp->police.rate_bytes_ps; 1290 fmi->cbs = entryp->police.burst; 1291 fmi->index = entryp->hw_index; 1292 filter->flags |= ENETC_PSFP_FLAGS_FMI; 1293 filter->fmi_index = fmi->index; 1294 sfi->meter_id = fmi->index; 1295 } 1296 1297 if (entryp->police.mtu) 1298 sfi->maxsdu = entryp->police.mtu; 1299 } 1300 1301 /* prio ref the filter prio */ 1302 if (f->common.prio && f->common.prio <= BIT(3)) 1303 sfi->prio = f->common.prio - 1; 1304 else 1305 sfi->prio = ENETC_PSFP_WILDCARD; 1306 1307 old_sfi = enetc_psfp_check_sfi(sfi); 1308 if (!old_sfi) { 1309 int index; 1310 1311 index = enetc_get_free_index(priv); 1312 if (index < 0) { 1313 NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!"); 1314 err = -ENOSPC; 1315 goto free_fmi; 1316 } 1317 1318 sfi->index = index; 1319 sfi->handle = index + HANDLE_OFFSET; 1320 /* Update the stream filter handle also */ 1321 filter->sid.handle = sfi->handle; 1322 filter->sfi_index = sfi->index; 1323 sfi_overwrite = 0; 1324 } else { 1325 filter->sfi_index = old_sfi->index; 1326 filter->sid.handle = old_sfi->handle; 1327 sfi_overwrite = 1; 1328 } 1329 1330 err = enetc_psfp_hw_set(priv, &filter->sid, 1331 sfi_overwrite ? NULL : sfi, sgi, fmi); 1332 if (err) 1333 goto free_fmi; 1334 1335 spin_lock(&epsfp.psfp_lock); 1336 if (filter->flags & ENETC_PSFP_FLAGS_FMI) { 1337 old_fmi = enetc_get_meter_by_index(filter->fmi_index); 1338 if (old_fmi) { 1339 fmi->refcount = old_fmi->refcount; 1340 refcount_set(&fmi->refcount, 1341 refcount_read(&old_fmi->refcount) + 1); 1342 hlist_del(&old_fmi->node); 1343 kfree(old_fmi); 1344 } 1345 hlist_add_head(&fmi->node, &epsfp.psfp_meter_list); 1346 } 1347 1348 /* Remove the old node if exist and update with a new node */ 1349 old_sgi = enetc_get_gate_by_index(filter->sgi_index); 1350 if (old_sgi) { 1351 refcount_set(&sgi->refcount, 1352 refcount_read(&old_sgi->refcount) + 1); 1353 hlist_del(&old_sgi->node); 1354 kfree(old_sgi); 1355 } 1356 1357 hlist_add_head(&sgi->node, &epsfp.psfp_gate_list); 1358 1359 if (!old_sfi) { 1360 hlist_add_head(&sfi->node, &epsfp.psfp_filter_list); 1361 set_bit(sfi->index, epsfp.psfp_sfi_bitmap); 1362 } else { 1363 kfree(sfi); 1364 refcount_inc(&old_sfi->refcount); 1365 } 1366 1367 old_filter = enetc_get_stream_by_index(filter->sid.index); 1368 if (old_filter) 1369 remove_one_chain(priv, old_filter); 1370 1371 filter->stats.lastused = jiffies; 1372 hlist_add_head(&filter->node, &epsfp.stream_list); 1373 1374 spin_unlock(&epsfp.psfp_lock); 1375 1376 return 0; 1377 1378 free_fmi: 1379 kfree(fmi); 1380 free_sfi: 1381 kfree(sfi); 1382 free_gate: 1383 kfree(sgi); 1384 free_filter: 1385 kfree(filter); 1386 1387 return err; 1388 } 1389 1390 static int enetc_config_clsflower(struct enetc_ndev_priv *priv, 1391 struct flow_cls_offload *cls_flower) 1392 { 1393 struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); 1394 struct netlink_ext_ack *extack = cls_flower->common.extack; 1395 struct flow_dissector *dissector = rule->match.dissector; 1396 struct flow_action *action = &rule->action; 1397 struct flow_action_entry *entry; 1398 struct actions_fwd *fwd; 1399 u64 actions = 0; 1400 int i, err; 1401 1402 if (!flow_action_has_entries(action)) { 1403 NL_SET_ERR_MSG_MOD(extack, "At least one action is needed"); 1404 return -EINVAL; 1405 } 1406 1407 flow_action_for_each(i, entry, action) 1408 actions |= BIT(entry->id); 1409 1410 fwd = enetc_check_flow_actions(actions, dissector->used_keys); 1411 if (!fwd) { 1412 NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!"); 1413 return -EOPNOTSUPP; 1414 } 1415 1416 if (fwd->output & FILTER_ACTION_TYPE_PSFP) { 1417 err = enetc_psfp_parse_clsflower(priv, cls_flower); 1418 if (err) { 1419 NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs"); 1420 return err; 1421 } 1422 } else { 1423 NL_SET_ERR_MSG_MOD(extack, "Unsupported actions"); 1424 return -EOPNOTSUPP; 1425 } 1426 1427 return 0; 1428 } 1429 1430 static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv, 1431 struct flow_cls_offload *f) 1432 { 1433 struct enetc_stream_filter *filter; 1434 struct netlink_ext_ack *extack = f->common.extack; 1435 int err; 1436 1437 if (f->common.chain_index >= priv->psfp_cap.max_streamid) { 1438 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); 1439 return -ENOSPC; 1440 } 1441 1442 filter = enetc_get_stream_by_index(f->common.chain_index); 1443 if (!filter) 1444 return -EINVAL; 1445 1446 err = enetc_streamid_hw_set(priv, &filter->sid, false); 1447 if (err) 1448 return err; 1449 1450 remove_one_chain(priv, filter); 1451 1452 return 0; 1453 } 1454 1455 static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv, 1456 struct flow_cls_offload *f) 1457 { 1458 return enetc_psfp_destroy_clsflower(priv, f); 1459 } 1460 1461 static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv, 1462 struct flow_cls_offload *f) 1463 { 1464 struct psfp_streamfilter_counters counters = {}; 1465 struct enetc_stream_filter *filter; 1466 struct flow_stats stats = {}; 1467 int err; 1468 1469 filter = enetc_get_stream_by_index(f->common.chain_index); 1470 if (!filter) 1471 return -EINVAL; 1472 1473 err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters); 1474 if (err) 1475 return -EINVAL; 1476 1477 spin_lock(&epsfp.psfp_lock); 1478 stats.pkts = counters.matching_frames_count + 1479 counters.not_passing_sdu_count - 1480 filter->stats.pkts; 1481 stats.drops = counters.not_passing_frames_count + 1482 counters.not_passing_sdu_count + 1483 counters.red_frames_count - 1484 filter->stats.drops; 1485 stats.lastused = filter->stats.lastused; 1486 filter->stats.pkts += stats.pkts; 1487 filter->stats.drops += stats.drops; 1488 spin_unlock(&epsfp.psfp_lock); 1489 1490 flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, 1491 stats.lastused, FLOW_ACTION_HW_STATS_DELAYED); 1492 1493 return 0; 1494 } 1495 1496 static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv, 1497 struct flow_cls_offload *cls_flower) 1498 { 1499 switch (cls_flower->command) { 1500 case FLOW_CLS_REPLACE: 1501 return enetc_config_clsflower(priv, cls_flower); 1502 case FLOW_CLS_DESTROY: 1503 return enetc_destroy_clsflower(priv, cls_flower); 1504 case FLOW_CLS_STATS: 1505 return enetc_psfp_get_stats(priv, cls_flower); 1506 default: 1507 return -EOPNOTSUPP; 1508 } 1509 } 1510 1511 static inline void clean_psfp_sfi_bitmap(void) 1512 { 1513 bitmap_free(epsfp.psfp_sfi_bitmap); 1514 epsfp.psfp_sfi_bitmap = NULL; 1515 } 1516 1517 static void clean_stream_list(void) 1518 { 1519 struct enetc_stream_filter *s; 1520 struct hlist_node *tmp; 1521 1522 hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) { 1523 hlist_del(&s->node); 1524 kfree(s); 1525 } 1526 } 1527 1528 static void clean_sfi_list(void) 1529 { 1530 struct enetc_psfp_filter *sfi; 1531 struct hlist_node *tmp; 1532 1533 hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) { 1534 hlist_del(&sfi->node); 1535 kfree(sfi); 1536 } 1537 } 1538 1539 static void clean_sgi_list(void) 1540 { 1541 struct enetc_psfp_gate *sgi; 1542 struct hlist_node *tmp; 1543 1544 hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) { 1545 hlist_del(&sgi->node); 1546 kfree(sgi); 1547 } 1548 } 1549 1550 static void clean_psfp_all(void) 1551 { 1552 /* Disable all list nodes and free all memory */ 1553 clean_sfi_list(); 1554 clean_sgi_list(); 1555 clean_stream_list(); 1556 epsfp.dev_bitmap = 0; 1557 clean_psfp_sfi_bitmap(); 1558 } 1559 1560 int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1561 void *cb_priv) 1562 { 1563 struct net_device *ndev = cb_priv; 1564 1565 if (!tc_can_offload(ndev)) 1566 return -EOPNOTSUPP; 1567 1568 switch (type) { 1569 case TC_SETUP_CLSFLOWER: 1570 return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data); 1571 default: 1572 return -EOPNOTSUPP; 1573 } 1574 } 1575 1576 int enetc_set_psfp(struct net_device *ndev, bool en) 1577 { 1578 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1579 int err; 1580 1581 if (en) { 1582 err = enetc_psfp_enable(priv); 1583 if (err) 1584 return err; 1585 1586 priv->active_offloads |= ENETC_F_QCI; 1587 return 0; 1588 } 1589 1590 err = enetc_psfp_disable(priv); 1591 if (err) 1592 return err; 1593 1594 priv->active_offloads &= ~ENETC_F_QCI; 1595 1596 return 0; 1597 } 1598 1599 int enetc_psfp_init(struct enetc_ndev_priv *priv) 1600 { 1601 if (epsfp.psfp_sfi_bitmap) 1602 return 0; 1603 1604 epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter, 1605 GFP_KERNEL); 1606 if (!epsfp.psfp_sfi_bitmap) 1607 return -ENOMEM; 1608 1609 spin_lock_init(&epsfp.psfp_lock); 1610 1611 if (list_empty(&enetc_block_cb_list)) 1612 epsfp.dev_bitmap = 0; 1613 1614 return 0; 1615 } 1616 1617 int enetc_psfp_clean(struct enetc_ndev_priv *priv) 1618 { 1619 if (!list_empty(&enetc_block_cb_list)) 1620 return -EBUSY; 1621 1622 clean_psfp_all(); 1623 1624 return 0; 1625 } 1626 1627 int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data) 1628 { 1629 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1630 struct flow_block_offload *f = type_data; 1631 int port, err; 1632 1633 err = flow_block_cb_setup_simple(f, &enetc_block_cb_list, 1634 enetc_setup_tc_block_cb, 1635 ndev, ndev, true); 1636 if (err) 1637 return err; 1638 1639 switch (f->command) { 1640 case FLOW_BLOCK_BIND: 1641 port = enetc_pf_to_port(priv->si->pdev); 1642 if (port < 0) 1643 return -EINVAL; 1644 1645 set_bit(port, &epsfp.dev_bitmap); 1646 break; 1647 case FLOW_BLOCK_UNBIND: 1648 port = enetc_pf_to_port(priv->si->pdev); 1649 if (port < 0) 1650 return -EINVAL; 1651 1652 clear_bit(port, &epsfp.dev_bitmap); 1653 if (!epsfp.dev_bitmap) 1654 clean_psfp_all(); 1655 break; 1656 } 1657 1658 return 0; 1659 } 1660 1661 int enetc_qos_query_caps(struct net_device *ndev, void *type_data) 1662 { 1663 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1664 struct tc_query_caps_base *base = type_data; 1665 struct enetc_si *si = priv->si; 1666 1667 switch (base->type) { 1668 case TC_SETUP_QDISC_MQPRIO: { 1669 struct tc_mqprio_caps *caps = base->caps; 1670 1671 caps->validate_queue_counts = true; 1672 1673 return 0; 1674 } 1675 case TC_SETUP_QDISC_TAPRIO: { 1676 struct tc_taprio_caps *caps = base->caps; 1677 1678 if (si->hw_features & ENETC_SI_F_QBV) 1679 caps->supports_queue_max_sdu = true; 1680 1681 return 0; 1682 } 1683 default: 1684 return -EOPNOTSUPP; 1685 } 1686 } 1687