1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2019 NXP */ 3 4 #include "enetc.h" 5 6 #include <net/pkt_sched.h> 7 #include <linux/math64.h> 8 #include <linux/refcount.h> 9 #include <net/pkt_cls.h> 10 #include <net/tc_act/tc_gate.h> 11 12 static u16 enetc_get_max_gcl_len(struct enetc_hw *hw) 13 { 14 return enetc_rd(hw, ENETC_PTGCAPR) & ENETC_PTGCAPR_MAX_GCL_LEN_MASK; 15 } 16 17 void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed) 18 { 19 struct enetc_hw *hw = &priv->si->hw; 20 u32 old_speed = priv->speed; 21 u32 pspeed, tmp; 22 23 if (speed == old_speed) 24 return; 25 26 switch (speed) { 27 case SPEED_1000: 28 pspeed = ENETC_PMR_PSPEED_1000M; 29 break; 30 case SPEED_2500: 31 pspeed = ENETC_PMR_PSPEED_2500M; 32 break; 33 case SPEED_100: 34 pspeed = ENETC_PMR_PSPEED_100M; 35 break; 36 case SPEED_10: 37 default: 38 pspeed = ENETC_PMR_PSPEED_10M; 39 } 40 41 priv->speed = speed; 42 tmp = enetc_port_rd(hw, ENETC_PMR); 43 enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed); 44 } 45 46 static int enetc_setup_taprio(struct net_device *ndev, 47 struct tc_taprio_qopt_offload *admin_conf) 48 { 49 struct enetc_ndev_priv *priv = netdev_priv(ndev); 50 struct enetc_hw *hw = &priv->si->hw; 51 struct enetc_cbd cbd = {.cmd = 0}; 52 struct tgs_gcl_conf *gcl_config; 53 struct tgs_gcl_data *gcl_data; 54 dma_addr_t dma; 55 struct gce *gce; 56 u16 data_size; 57 u16 gcl_len; 58 void *tmp; 59 u32 tge; 60 int err; 61 int i; 62 63 if (admin_conf->num_entries > enetc_get_max_gcl_len(hw)) 64 return -EINVAL; 65 gcl_len = admin_conf->num_entries; 66 67 tge = enetc_rd(hw, ENETC_PTGCR); 68 if (!admin_conf->enable) { 69 enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE); 70 enetc_reset_ptcmsdur(hw); 71 72 priv->active_offloads &= ~ENETC_F_QBV; 73 74 return 0; 75 } 76 77 if (admin_conf->cycle_time > U32_MAX || 78 admin_conf->cycle_time_extension > U32_MAX) 79 return -EINVAL; 80 81 /* Configure the (administrative) gate control list using the 82 * control BD descriptor. 83 */ 84 gcl_config = &cbd.gcl_conf; 85 86 data_size = struct_size(gcl_data, entry, gcl_len); 87 tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size, 88 &dma, (void *)&gcl_data); 89 if (!tmp) 90 return -ENOMEM; 91 92 gce = (struct gce *)(gcl_data + 1); 93 94 /* Set all gates open as default */ 95 gcl_config->atc = 0xff; 96 gcl_config->acl_len = cpu_to_le16(gcl_len); 97 98 gcl_data->btl = cpu_to_le32(lower_32_bits(admin_conf->base_time)); 99 gcl_data->bth = cpu_to_le32(upper_32_bits(admin_conf->base_time)); 100 gcl_data->ct = cpu_to_le32(admin_conf->cycle_time); 101 gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension); 102 103 for (i = 0; i < gcl_len; i++) { 104 struct tc_taprio_sched_entry *temp_entry; 105 struct gce *temp_gce = gce + i; 106 107 temp_entry = &admin_conf->entries[i]; 108 109 temp_gce->gate = (u8)temp_entry->gate_mask; 110 temp_gce->period = cpu_to_le32(temp_entry->interval); 111 } 112 113 cbd.status_flags = 0; 114 115 cbd.cls = BDCR_CMD_PORT_GCL; 116 cbd.status_flags = 0; 117 118 enetc_wr(hw, ENETC_PTGCR, tge | ENETC_PTGCR_TGE); 119 120 err = enetc_send_cmd(priv->si, &cbd); 121 if (err) 122 enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE); 123 124 enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma); 125 126 if (err) 127 return err; 128 129 enetc_set_ptcmsdur(hw, admin_conf->max_sdu); 130 priv->active_offloads |= ENETC_F_QBV; 131 132 return 0; 133 } 134 135 int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) 136 { 137 struct tc_taprio_qopt_offload *taprio = type_data; 138 struct enetc_ndev_priv *priv = netdev_priv(ndev); 139 struct enetc_hw *hw = &priv->si->hw; 140 int err; 141 int i; 142 143 /* TSD and Qbv are mutually exclusive in hardware */ 144 for (i = 0; i < priv->num_tx_rings; i++) 145 if (priv->tx_ring[i]->tsd_enable) 146 return -EBUSY; 147 148 for (i = 0; i < priv->num_tx_rings; i++) 149 enetc_set_bdr_prio(hw, priv->tx_ring[i]->index, 150 taprio->enable ? i : 0); 151 152 err = enetc_setup_taprio(ndev, taprio); 153 154 if (err) 155 for (i = 0; i < priv->num_tx_rings; i++) 156 enetc_set_bdr_prio(hw, priv->tx_ring[i]->index, 157 taprio->enable ? 0 : i); 158 159 return err; 160 } 161 162 static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc) 163 { 164 return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE; 165 } 166 167 static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc) 168 { 169 return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK; 170 } 171 172 int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) 173 { 174 struct enetc_ndev_priv *priv = netdev_priv(ndev); 175 struct tc_cbs_qopt_offload *cbs = type_data; 176 u32 port_transmit_rate = priv->speed; 177 u8 tc_nums = netdev_get_num_tc(ndev); 178 struct enetc_hw *hw = &priv->si->hw; 179 u32 hi_credit_bit, hi_credit_reg; 180 u32 max_interference_size; 181 u32 port_frame_max_size; 182 u8 tc = cbs->queue; 183 u8 prio_top, prio_next; 184 int bw_sum = 0; 185 u8 bw; 186 187 prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1); 188 prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2); 189 190 /* Support highest prio and second prio tc in cbs mode */ 191 if (tc != prio_top && tc != prio_next) 192 return -EOPNOTSUPP; 193 194 if (!cbs->enable) { 195 /* Make sure the other TC that are numerically 196 * lower than this TC have been disabled. 197 */ 198 if (tc == prio_top && 199 enetc_get_cbs_enable(hw, prio_next)) { 200 dev_err(&ndev->dev, 201 "Disable TC%d before disable TC%d\n", 202 prio_next, tc); 203 return -EINVAL; 204 } 205 206 enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0); 207 enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0); 208 209 return 0; 210 } 211 212 if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L || 213 cbs->idleslope < 0 || cbs->sendslope > 0) 214 return -EOPNOTSUPP; 215 216 port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 217 218 bw = cbs->idleslope / (port_transmit_rate * 10UL); 219 220 /* Make sure the other TC that are numerically 221 * higher than this TC have been enabled. 222 */ 223 if (tc == prio_next) { 224 if (!enetc_get_cbs_enable(hw, prio_top)) { 225 dev_err(&ndev->dev, 226 "Enable TC%d first before enable TC%d\n", 227 prio_top, prio_next); 228 return -EINVAL; 229 } 230 bw_sum += enetc_get_cbs_bw(hw, prio_top); 231 } 232 233 if (bw_sum + bw >= 100) { 234 dev_err(&ndev->dev, 235 "The sum of all CBS Bandwidth can't exceed 100\n"); 236 return -EINVAL; 237 } 238 239 enetc_port_rd(hw, ENETC_PTCMSDUR(tc)); 240 241 /* For top prio TC, the max_interfrence_size is maxSizedFrame. 242 * 243 * For next prio TC, the max_interfrence_size is calculated as below: 244 * 245 * max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra) 246 * 247 * - RA: idleSlope for AVB Class A 248 * - R0: port transmit rate 249 * - M0: maximum sized frame for the port 250 * - MA: maximum sized frame for AVB Class A 251 */ 252 253 if (tc == prio_top) { 254 max_interference_size = port_frame_max_size * 8; 255 } else { 256 u32 m0, ma, r0, ra; 257 258 m0 = port_frame_max_size * 8; 259 ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8; 260 ra = enetc_get_cbs_bw(hw, prio_top) * 261 port_transmit_rate * 10000ULL; 262 r0 = port_transmit_rate * 1000000ULL; 263 max_interference_size = m0 + ma + 264 (u32)div_u64((u64)ra * m0, r0 - ra); 265 } 266 267 /* hiCredit bits calculate by: 268 * 269 * maxSizedFrame * (idleSlope/portTxRate) 270 */ 271 hi_credit_bit = max_interference_size * bw / 100; 272 273 /* hiCredit bits to hiCredit register need to calculated as: 274 * 275 * (enetClockFrequency / portTransmitRate) * 100 276 */ 277 hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit, 278 port_transmit_rate * 1000000ULL); 279 280 enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg); 281 282 /* Set bw register and enable this traffic class */ 283 enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE); 284 285 return 0; 286 } 287 288 int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) 289 { 290 struct enetc_ndev_priv *priv = netdev_priv(ndev); 291 struct tc_etf_qopt_offload *qopt = type_data; 292 u8 tc_nums = netdev_get_num_tc(ndev); 293 struct enetc_hw *hw = &priv->si->hw; 294 int tc; 295 296 if (!tc_nums) 297 return -EOPNOTSUPP; 298 299 tc = qopt->queue; 300 301 if (tc < 0 || tc >= priv->num_tx_rings) 302 return -EINVAL; 303 304 /* TSD and Qbv are mutually exclusive in hardware */ 305 if (enetc_rd(hw, ENETC_PTGCR) & ENETC_PTGCR_TGE) 306 return -EBUSY; 307 308 priv->tx_ring[tc]->tsd_enable = qopt->enable; 309 enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0); 310 311 return 0; 312 } 313 314 enum streamid_type { 315 STREAMID_TYPE_RESERVED = 0, 316 STREAMID_TYPE_NULL, 317 STREAMID_TYPE_SMAC, 318 }; 319 320 enum streamid_vlan_tagged { 321 STREAMID_VLAN_RESERVED = 0, 322 STREAMID_VLAN_TAGGED, 323 STREAMID_VLAN_UNTAGGED, 324 STREAMID_VLAN_ALL, 325 }; 326 327 #define ENETC_PSFP_WILDCARD -1 328 #define HANDLE_OFFSET 100 329 330 enum forward_type { 331 FILTER_ACTION_TYPE_PSFP = BIT(0), 332 FILTER_ACTION_TYPE_ACL = BIT(1), 333 FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0), 334 }; 335 336 /* This is for limit output type for input actions */ 337 struct actions_fwd { 338 u64 actions; 339 u64 keys; /* include the must needed keys */ 340 enum forward_type output; 341 }; 342 343 struct psfp_streamfilter_counters { 344 u64 matching_frames_count; 345 u64 passing_frames_count; 346 u64 not_passing_frames_count; 347 u64 passing_sdu_count; 348 u64 not_passing_sdu_count; 349 u64 red_frames_count; 350 }; 351 352 struct enetc_streamid { 353 u32 index; 354 union { 355 u8 src_mac[6]; 356 u8 dst_mac[6]; 357 }; 358 u8 filtertype; 359 u16 vid; 360 u8 tagged; 361 s32 handle; 362 }; 363 364 struct enetc_psfp_filter { 365 u32 index; 366 s32 handle; 367 s8 prio; 368 u32 maxsdu; 369 u32 gate_id; 370 s32 meter_id; 371 refcount_t refcount; 372 struct hlist_node node; 373 }; 374 375 struct enetc_psfp_gate { 376 u32 index; 377 s8 init_ipv; 378 u64 basetime; 379 u64 cycletime; 380 u64 cycletimext; 381 u32 num_entries; 382 refcount_t refcount; 383 struct hlist_node node; 384 struct action_gate_entry entries[]; 385 }; 386 387 /* Only enable the green color frame now 388 * Will add eir and ebs color blind, couple flag etc when 389 * policing action add more offloading parameters 390 */ 391 struct enetc_psfp_meter { 392 u32 index; 393 u32 cir; 394 u32 cbs; 395 refcount_t refcount; 396 struct hlist_node node; 397 }; 398 399 #define ENETC_PSFP_FLAGS_FMI BIT(0) 400 401 struct enetc_stream_filter { 402 struct enetc_streamid sid; 403 u32 sfi_index; 404 u32 sgi_index; 405 u32 flags; 406 u32 fmi_index; 407 struct flow_stats stats; 408 struct hlist_node node; 409 }; 410 411 struct enetc_psfp { 412 unsigned long dev_bitmap; 413 unsigned long *psfp_sfi_bitmap; 414 struct hlist_head stream_list; 415 struct hlist_head psfp_filter_list; 416 struct hlist_head psfp_gate_list; 417 struct hlist_head psfp_meter_list; 418 spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */ 419 }; 420 421 static struct actions_fwd enetc_act_fwd[] = { 422 { 423 BIT(FLOW_ACTION_GATE), 424 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), 425 FILTER_ACTION_TYPE_PSFP 426 }, 427 { 428 BIT(FLOW_ACTION_POLICE) | 429 BIT(FLOW_ACTION_GATE), 430 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), 431 FILTER_ACTION_TYPE_PSFP 432 }, 433 /* example for ACL actions */ 434 { 435 BIT(FLOW_ACTION_DROP), 436 0, 437 FILTER_ACTION_TYPE_ACL 438 } 439 }; 440 441 static struct enetc_psfp epsfp = { 442 .dev_bitmap = 0, 443 .psfp_sfi_bitmap = NULL, 444 }; 445 446 static LIST_HEAD(enetc_block_cb_list); 447 448 /* Stream Identity Entry Set Descriptor */ 449 static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, 450 struct enetc_streamid *sid, 451 u8 enable) 452 { 453 struct enetc_cbd cbd = {.cmd = 0}; 454 struct streamid_data *si_data; 455 struct streamid_conf *si_conf; 456 dma_addr_t dma; 457 u16 data_size; 458 void *tmp; 459 int port; 460 int err; 461 462 port = enetc_pf_to_port(priv->si->pdev); 463 if (port < 0) 464 return -EINVAL; 465 466 if (sid->index >= priv->psfp_cap.max_streamid) 467 return -EINVAL; 468 469 if (sid->filtertype != STREAMID_TYPE_NULL && 470 sid->filtertype != STREAMID_TYPE_SMAC) 471 return -EOPNOTSUPP; 472 473 /* Disable operation before enable */ 474 cbd.index = cpu_to_le16((u16)sid->index); 475 cbd.cls = BDCR_CMD_STREAM_IDENTIFY; 476 cbd.status_flags = 0; 477 478 data_size = sizeof(struct streamid_data); 479 tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size, 480 &dma, (void *)&si_data); 481 if (!tmp) 482 return -ENOMEM; 483 484 eth_broadcast_addr(si_data->dmac); 485 si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK 486 + ((0x3 << 14) | ENETC_CBDR_SID_VIDM)); 487 488 si_conf = &cbd.sid_set; 489 /* Only one port supported for one entry, set itself */ 490 si_conf->iports = cpu_to_le32(1 << port); 491 si_conf->id_type = 1; 492 si_conf->oui[2] = 0x0; 493 si_conf->oui[1] = 0x80; 494 si_conf->oui[0] = 0xC2; 495 496 err = enetc_send_cmd(priv->si, &cbd); 497 if (err) 498 goto out; 499 500 if (!enable) 501 goto out; 502 503 /* Enable the entry overwrite again incase space flushed by hardware */ 504 cbd.status_flags = 0; 505 506 si_conf->en = 0x80; 507 si_conf->stream_handle = cpu_to_le32(sid->handle); 508 si_conf->iports = cpu_to_le32(1 << port); 509 si_conf->id_type = sid->filtertype; 510 si_conf->oui[2] = 0x0; 511 si_conf->oui[1] = 0x80; 512 si_conf->oui[0] = 0xC2; 513 514 memset(si_data, 0, data_size); 515 516 /* VIDM default to be 1. 517 * VID Match. If set (b1) then the VID must match, otherwise 518 * any VID is considered a match. VIDM setting is only used 519 * when TG is set to b01. 520 */ 521 if (si_conf->id_type == STREAMID_TYPE_NULL) { 522 ether_addr_copy(si_data->dmac, sid->dst_mac); 523 si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) + 524 ((((u16)(sid->tagged) & 0x3) << 14) 525 | ENETC_CBDR_SID_VIDM); 526 } else if (si_conf->id_type == STREAMID_TYPE_SMAC) { 527 ether_addr_copy(si_data->smac, sid->src_mac); 528 si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) + 529 ((((u16)(sid->tagged) & 0x3) << 14) 530 | ENETC_CBDR_SID_VIDM); 531 } 532 533 err = enetc_send_cmd(priv->si, &cbd); 534 out: 535 enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma); 536 537 return err; 538 } 539 540 /* Stream Filter Instance Set Descriptor */ 541 static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv, 542 struct enetc_psfp_filter *sfi, 543 u8 enable) 544 { 545 struct enetc_cbd cbd = {.cmd = 0}; 546 struct sfi_conf *sfi_config; 547 int port; 548 549 port = enetc_pf_to_port(priv->si->pdev); 550 if (port < 0) 551 return -EINVAL; 552 553 cbd.index = cpu_to_le16(sfi->index); 554 cbd.cls = BDCR_CMD_STREAM_FILTER; 555 cbd.status_flags = 0x80; 556 cbd.length = cpu_to_le16(1); 557 558 sfi_config = &cbd.sfi_conf; 559 if (!enable) 560 goto exit; 561 562 sfi_config->en = 0x80; 563 564 if (sfi->handle >= 0) { 565 sfi_config->stream_handle = 566 cpu_to_le32(sfi->handle); 567 sfi_config->sthm |= 0x80; 568 } 569 570 sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id); 571 sfi_config->input_ports = cpu_to_le32(1 << port); 572 573 /* The priority value which may be matched against the 574 * frame’s priority value to determine a match for this entry. 575 */ 576 if (sfi->prio >= 0) 577 sfi_config->multi |= (sfi->prio & 0x7) | 0x8; 578 579 /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX 580 * field as being either an MSDU value or an index into the Flow 581 * Meter Instance table. 582 */ 583 if (sfi->maxsdu) { 584 sfi_config->msdu = 585 cpu_to_le16(sfi->maxsdu); 586 sfi_config->multi |= 0x40; 587 } 588 589 if (sfi->meter_id >= 0) { 590 sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id); 591 sfi_config->multi |= 0x80; 592 } 593 594 exit: 595 return enetc_send_cmd(priv->si, &cbd); 596 } 597 598 static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv, 599 u32 index, 600 struct psfp_streamfilter_counters *cnt) 601 { 602 struct enetc_cbd cbd = { .cmd = 2 }; 603 struct sfi_counter_data *data_buf; 604 dma_addr_t dma; 605 u16 data_size; 606 void *tmp; 607 int err; 608 609 cbd.index = cpu_to_le16((u16)index); 610 cbd.cmd = 2; 611 cbd.cls = BDCR_CMD_STREAM_FILTER; 612 cbd.status_flags = 0; 613 614 data_size = sizeof(struct sfi_counter_data); 615 616 tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size, 617 &dma, (void *)&data_buf); 618 if (!tmp) 619 return -ENOMEM; 620 621 err = enetc_send_cmd(priv->si, &cbd); 622 if (err) 623 goto exit; 624 625 cnt->matching_frames_count = ((u64)data_buf->matchh << 32) + 626 data_buf->matchl; 627 628 cnt->not_passing_sdu_count = ((u64)data_buf->msdu_droph << 32) + 629 data_buf->msdu_dropl; 630 631 cnt->passing_sdu_count = cnt->matching_frames_count 632 - cnt->not_passing_sdu_count; 633 634 cnt->not_passing_frames_count = 635 ((u64)data_buf->stream_gate_droph << 32) + 636 data_buf->stream_gate_dropl; 637 638 cnt->passing_frames_count = cnt->matching_frames_count - 639 cnt->not_passing_sdu_count - 640 cnt->not_passing_frames_count; 641 642 cnt->red_frames_count = ((u64)data_buf->flow_meter_droph << 32) + 643 data_buf->flow_meter_dropl; 644 645 exit: 646 enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma); 647 648 return err; 649 } 650 651 static u64 get_ptp_now(struct enetc_hw *hw) 652 { 653 u64 now_lo, now_hi, now; 654 655 now_lo = enetc_rd(hw, ENETC_SICTR0); 656 now_hi = enetc_rd(hw, ENETC_SICTR1); 657 now = now_lo | now_hi << 32; 658 659 return now; 660 } 661 662 static int get_start_ns(u64 now, u64 cycle, u64 *start) 663 { 664 u64 n; 665 666 if (!cycle) 667 return -EFAULT; 668 669 n = div64_u64(now, cycle); 670 671 *start = (n + 1) * cycle; 672 673 return 0; 674 } 675 676 /* Stream Gate Instance Set Descriptor */ 677 static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, 678 struct enetc_psfp_gate *sgi, 679 u8 enable) 680 { 681 struct enetc_cbd cbd = { .cmd = 0 }; 682 struct sgi_table *sgi_config; 683 struct sgcl_conf *sgcl_config; 684 struct sgcl_data *sgcl_data; 685 struct sgce *sgce; 686 dma_addr_t dma; 687 u16 data_size; 688 int err, i; 689 void *tmp; 690 u64 now; 691 692 cbd.index = cpu_to_le16(sgi->index); 693 cbd.cmd = 0; 694 cbd.cls = BDCR_CMD_STREAM_GCL; 695 cbd.status_flags = 0x80; 696 697 /* disable */ 698 if (!enable) 699 return enetc_send_cmd(priv->si, &cbd); 700 701 if (!sgi->num_entries) 702 return 0; 703 704 if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist || 705 !sgi->cycletime) 706 return -EINVAL; 707 708 /* enable */ 709 sgi_config = &cbd.sgi_table; 710 711 /* Keep open before gate list start */ 712 sgi_config->ocgtst = 0x80; 713 714 sgi_config->oipv = (sgi->init_ipv < 0) ? 715 0x0 : ((sgi->init_ipv & 0x7) | 0x8); 716 717 sgi_config->en = 0x80; 718 719 /* Basic config */ 720 err = enetc_send_cmd(priv->si, &cbd); 721 if (err) 722 return -EINVAL; 723 724 memset(&cbd, 0, sizeof(cbd)); 725 726 cbd.index = cpu_to_le16(sgi->index); 727 cbd.cmd = 1; 728 cbd.cls = BDCR_CMD_STREAM_GCL; 729 cbd.status_flags = 0; 730 731 sgcl_config = &cbd.sgcl_conf; 732 733 sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3; 734 735 data_size = struct_size(sgcl_data, sgcl, sgi->num_entries); 736 tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size, 737 &dma, (void *)&sgcl_data); 738 if (!tmp) 739 return -ENOMEM; 740 741 sgce = &sgcl_data->sgcl[0]; 742 743 sgcl_config->agtst = 0x80; 744 745 sgcl_data->ct = sgi->cycletime; 746 sgcl_data->cte = sgi->cycletimext; 747 748 if (sgi->init_ipv >= 0) 749 sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8; 750 751 for (i = 0; i < sgi->num_entries; i++) { 752 struct action_gate_entry *from = &sgi->entries[i]; 753 struct sgce *to = &sgce[i]; 754 755 if (from->gate_state) 756 to->multi |= 0x10; 757 758 if (from->ipv >= 0) 759 to->multi |= ((from->ipv & 0x7) << 5) | 0x08; 760 761 if (from->maxoctets >= 0) { 762 to->multi |= 0x01; 763 to->msdu[0] = from->maxoctets & 0xFF; 764 to->msdu[1] = (from->maxoctets >> 8) & 0xFF; 765 to->msdu[2] = (from->maxoctets >> 16) & 0xFF; 766 } 767 768 to->interval = from->interval; 769 } 770 771 /* If basetime is less than now, calculate start time */ 772 now = get_ptp_now(&priv->si->hw); 773 774 if (sgi->basetime < now) { 775 u64 start; 776 777 err = get_start_ns(now, sgi->cycletime, &start); 778 if (err) 779 goto exit; 780 sgcl_data->btl = lower_32_bits(start); 781 sgcl_data->bth = upper_32_bits(start); 782 } else { 783 u32 hi, lo; 784 785 hi = upper_32_bits(sgi->basetime); 786 lo = lower_32_bits(sgi->basetime); 787 sgcl_data->bth = hi; 788 sgcl_data->btl = lo; 789 } 790 791 err = enetc_send_cmd(priv->si, &cbd); 792 793 exit: 794 enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma); 795 return err; 796 } 797 798 static int enetc_flowmeter_hw_set(struct enetc_ndev_priv *priv, 799 struct enetc_psfp_meter *fmi, 800 u8 enable) 801 { 802 struct enetc_cbd cbd = { .cmd = 0 }; 803 struct fmi_conf *fmi_config; 804 u64 temp = 0; 805 806 cbd.index = cpu_to_le16((u16)fmi->index); 807 cbd.cls = BDCR_CMD_FLOW_METER; 808 cbd.status_flags = 0x80; 809 810 if (!enable) 811 return enetc_send_cmd(priv->si, &cbd); 812 813 fmi_config = &cbd.fmi_conf; 814 fmi_config->en = 0x80; 815 816 if (fmi->cir) { 817 temp = (u64)8000 * fmi->cir; 818 temp = div_u64(temp, 3725); 819 } 820 821 fmi_config->cir = cpu_to_le32((u32)temp); 822 fmi_config->cbs = cpu_to_le32(fmi->cbs); 823 824 /* Default for eir ebs disable */ 825 fmi_config->eir = 0; 826 fmi_config->ebs = 0; 827 828 /* Default: 829 * mark red disable 830 * drop on yellow disable 831 * color mode disable 832 * couple flag disable 833 */ 834 fmi_config->conf = 0; 835 836 return enetc_send_cmd(priv->si, &cbd); 837 } 838 839 static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index) 840 { 841 struct enetc_stream_filter *f; 842 843 hlist_for_each_entry(f, &epsfp.stream_list, node) 844 if (f->sid.index == index) 845 return f; 846 847 return NULL; 848 } 849 850 static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index) 851 { 852 struct enetc_psfp_gate *g; 853 854 hlist_for_each_entry(g, &epsfp.psfp_gate_list, node) 855 if (g->index == index) 856 return g; 857 858 return NULL; 859 } 860 861 static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index) 862 { 863 struct enetc_psfp_filter *s; 864 865 hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) 866 if (s->index == index) 867 return s; 868 869 return NULL; 870 } 871 872 static struct enetc_psfp_meter *enetc_get_meter_by_index(u32 index) 873 { 874 struct enetc_psfp_meter *m; 875 876 hlist_for_each_entry(m, &epsfp.psfp_meter_list, node) 877 if (m->index == index) 878 return m; 879 880 return NULL; 881 } 882 883 static struct enetc_psfp_filter 884 *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi) 885 { 886 struct enetc_psfp_filter *s; 887 888 hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) 889 if (s->gate_id == sfi->gate_id && 890 s->prio == sfi->prio && 891 s->maxsdu == sfi->maxsdu && 892 s->meter_id == sfi->meter_id) 893 return s; 894 895 return NULL; 896 } 897 898 static int enetc_get_free_index(struct enetc_ndev_priv *priv) 899 { 900 u32 max_size = priv->psfp_cap.max_psfp_filter; 901 unsigned long index; 902 903 index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size); 904 if (index == max_size) 905 return -1; 906 907 return index; 908 } 909 910 static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index) 911 { 912 struct enetc_psfp_filter *sfi; 913 u8 z; 914 915 sfi = enetc_get_filter_by_index(index); 916 WARN_ON(!sfi); 917 z = refcount_dec_and_test(&sfi->refcount); 918 919 if (z) { 920 enetc_streamfilter_hw_set(priv, sfi, false); 921 hlist_del(&sfi->node); 922 kfree(sfi); 923 clear_bit(index, epsfp.psfp_sfi_bitmap); 924 } 925 } 926 927 static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index) 928 { 929 struct enetc_psfp_gate *sgi; 930 u8 z; 931 932 sgi = enetc_get_gate_by_index(index); 933 WARN_ON(!sgi); 934 z = refcount_dec_and_test(&sgi->refcount); 935 if (z) { 936 enetc_streamgate_hw_set(priv, sgi, false); 937 hlist_del(&sgi->node); 938 kfree(sgi); 939 } 940 } 941 942 static void flow_meter_unref(struct enetc_ndev_priv *priv, u32 index) 943 { 944 struct enetc_psfp_meter *fmi; 945 u8 z; 946 947 fmi = enetc_get_meter_by_index(index); 948 WARN_ON(!fmi); 949 z = refcount_dec_and_test(&fmi->refcount); 950 if (z) { 951 enetc_flowmeter_hw_set(priv, fmi, false); 952 hlist_del(&fmi->node); 953 kfree(fmi); 954 } 955 } 956 957 static void remove_one_chain(struct enetc_ndev_priv *priv, 958 struct enetc_stream_filter *filter) 959 { 960 if (filter->flags & ENETC_PSFP_FLAGS_FMI) 961 flow_meter_unref(priv, filter->fmi_index); 962 963 stream_gate_unref(priv, filter->sgi_index); 964 stream_filter_unref(priv, filter->sfi_index); 965 966 hlist_del(&filter->node); 967 kfree(filter); 968 } 969 970 static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv, 971 struct enetc_streamid *sid, 972 struct enetc_psfp_filter *sfi, 973 struct enetc_psfp_gate *sgi, 974 struct enetc_psfp_meter *fmi) 975 { 976 int err; 977 978 err = enetc_streamid_hw_set(priv, sid, true); 979 if (err) 980 return err; 981 982 if (sfi) { 983 err = enetc_streamfilter_hw_set(priv, sfi, true); 984 if (err) 985 goto revert_sid; 986 } 987 988 err = enetc_streamgate_hw_set(priv, sgi, true); 989 if (err) 990 goto revert_sfi; 991 992 if (fmi) { 993 err = enetc_flowmeter_hw_set(priv, fmi, true); 994 if (err) 995 goto revert_sgi; 996 } 997 998 return 0; 999 1000 revert_sgi: 1001 enetc_streamgate_hw_set(priv, sgi, false); 1002 revert_sfi: 1003 if (sfi) 1004 enetc_streamfilter_hw_set(priv, sfi, false); 1005 revert_sid: 1006 enetc_streamid_hw_set(priv, sid, false); 1007 return err; 1008 } 1009 1010 static struct actions_fwd *enetc_check_flow_actions(u64 acts, 1011 unsigned int inputkeys) 1012 { 1013 int i; 1014 1015 for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++) 1016 if (acts == enetc_act_fwd[i].actions && 1017 inputkeys & enetc_act_fwd[i].keys) 1018 return &enetc_act_fwd[i]; 1019 1020 return NULL; 1021 } 1022 1023 static int enetc_psfp_policer_validate(const struct flow_action *action, 1024 const struct flow_action_entry *act, 1025 struct netlink_ext_ack *extack) 1026 { 1027 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 1028 NL_SET_ERR_MSG_MOD(extack, 1029 "Offload not supported when exceed action is not drop"); 1030 return -EOPNOTSUPP; 1031 } 1032 1033 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 1034 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 1035 NL_SET_ERR_MSG_MOD(extack, 1036 "Offload not supported when conform action is not pipe or ok"); 1037 return -EOPNOTSUPP; 1038 } 1039 1040 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 1041 !flow_action_is_last_entry(action, act)) { 1042 NL_SET_ERR_MSG_MOD(extack, 1043 "Offload not supported when conform action is ok, but action is not last"); 1044 return -EOPNOTSUPP; 1045 } 1046 1047 if (act->police.peakrate_bytes_ps || 1048 act->police.avrate || act->police.overhead) { 1049 NL_SET_ERR_MSG_MOD(extack, 1050 "Offload not supported when peakrate/avrate/overhead is configured"); 1051 return -EOPNOTSUPP; 1052 } 1053 1054 if (act->police.rate_pkt_ps) { 1055 NL_SET_ERR_MSG_MOD(extack, 1056 "QoS offload not support packets per second"); 1057 return -EOPNOTSUPP; 1058 } 1059 1060 return 0; 1061 } 1062 1063 static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, 1064 struct flow_cls_offload *f) 1065 { 1066 struct flow_action_entry *entryg = NULL, *entryp = NULL; 1067 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 1068 struct netlink_ext_ack *extack = f->common.extack; 1069 struct enetc_stream_filter *filter, *old_filter; 1070 struct enetc_psfp_meter *fmi = NULL, *old_fmi; 1071 struct enetc_psfp_filter *sfi, *old_sfi; 1072 struct enetc_psfp_gate *sgi, *old_sgi; 1073 struct flow_action_entry *entry; 1074 struct action_gate_entry *e; 1075 u8 sfi_overwrite = 0; 1076 int entries_size; 1077 int i, err; 1078 1079 if (f->common.chain_index >= priv->psfp_cap.max_streamid) { 1080 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); 1081 return -ENOSPC; 1082 } 1083 1084 flow_action_for_each(i, entry, &rule->action) 1085 if (entry->id == FLOW_ACTION_GATE) 1086 entryg = entry; 1087 else if (entry->id == FLOW_ACTION_POLICE) 1088 entryp = entry; 1089 1090 /* Not support without gate action */ 1091 if (!entryg) 1092 return -EINVAL; 1093 1094 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 1095 if (!filter) 1096 return -ENOMEM; 1097 1098 filter->sid.index = f->common.chain_index; 1099 1100 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 1101 struct flow_match_eth_addrs match; 1102 1103 flow_rule_match_eth_addrs(rule, &match); 1104 1105 if (!is_zero_ether_addr(match.mask->dst) && 1106 !is_zero_ether_addr(match.mask->src)) { 1107 NL_SET_ERR_MSG_MOD(extack, 1108 "Cannot match on both source and destination MAC"); 1109 err = -EINVAL; 1110 goto free_filter; 1111 } 1112 1113 if (!is_zero_ether_addr(match.mask->dst)) { 1114 if (!is_broadcast_ether_addr(match.mask->dst)) { 1115 NL_SET_ERR_MSG_MOD(extack, 1116 "Masked matching on destination MAC not supported"); 1117 err = -EINVAL; 1118 goto free_filter; 1119 } 1120 ether_addr_copy(filter->sid.dst_mac, match.key->dst); 1121 filter->sid.filtertype = STREAMID_TYPE_NULL; 1122 } 1123 1124 if (!is_zero_ether_addr(match.mask->src)) { 1125 if (!is_broadcast_ether_addr(match.mask->src)) { 1126 NL_SET_ERR_MSG_MOD(extack, 1127 "Masked matching on source MAC not supported"); 1128 err = -EINVAL; 1129 goto free_filter; 1130 } 1131 ether_addr_copy(filter->sid.src_mac, match.key->src); 1132 filter->sid.filtertype = STREAMID_TYPE_SMAC; 1133 } 1134 } else { 1135 NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS"); 1136 err = -EINVAL; 1137 goto free_filter; 1138 } 1139 1140 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 1141 struct flow_match_vlan match; 1142 1143 flow_rule_match_vlan(rule, &match); 1144 if (match.mask->vlan_priority) { 1145 if (match.mask->vlan_priority != 1146 (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) { 1147 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); 1148 err = -EINVAL; 1149 goto free_filter; 1150 } 1151 } 1152 1153 if (match.mask->vlan_id) { 1154 if (match.mask->vlan_id != VLAN_VID_MASK) { 1155 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id"); 1156 err = -EINVAL; 1157 goto free_filter; 1158 } 1159 1160 filter->sid.vid = match.key->vlan_id; 1161 if (!filter->sid.vid) 1162 filter->sid.tagged = STREAMID_VLAN_UNTAGGED; 1163 else 1164 filter->sid.tagged = STREAMID_VLAN_TAGGED; 1165 } 1166 } else { 1167 filter->sid.tagged = STREAMID_VLAN_ALL; 1168 } 1169 1170 /* parsing gate action */ 1171 if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) { 1172 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); 1173 err = -ENOSPC; 1174 goto free_filter; 1175 } 1176 1177 if (entryg->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) { 1178 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); 1179 err = -ENOSPC; 1180 goto free_filter; 1181 } 1182 1183 entries_size = struct_size(sgi, entries, entryg->gate.num_entries); 1184 sgi = kzalloc(entries_size, GFP_KERNEL); 1185 if (!sgi) { 1186 err = -ENOMEM; 1187 goto free_filter; 1188 } 1189 1190 refcount_set(&sgi->refcount, 1); 1191 sgi->index = entryg->hw_index; 1192 sgi->init_ipv = entryg->gate.prio; 1193 sgi->basetime = entryg->gate.basetime; 1194 sgi->cycletime = entryg->gate.cycletime; 1195 sgi->num_entries = entryg->gate.num_entries; 1196 1197 e = sgi->entries; 1198 for (i = 0; i < entryg->gate.num_entries; i++) { 1199 e[i].gate_state = entryg->gate.entries[i].gate_state; 1200 e[i].interval = entryg->gate.entries[i].interval; 1201 e[i].ipv = entryg->gate.entries[i].ipv; 1202 e[i].maxoctets = entryg->gate.entries[i].maxoctets; 1203 } 1204 1205 filter->sgi_index = sgi->index; 1206 1207 sfi = kzalloc(sizeof(*sfi), GFP_KERNEL); 1208 if (!sfi) { 1209 err = -ENOMEM; 1210 goto free_gate; 1211 } 1212 1213 refcount_set(&sfi->refcount, 1); 1214 sfi->gate_id = sgi->index; 1215 sfi->meter_id = ENETC_PSFP_WILDCARD; 1216 1217 /* Flow meter and max frame size */ 1218 if (entryp) { 1219 err = enetc_psfp_policer_validate(&rule->action, entryp, extack); 1220 if (err) 1221 goto free_sfi; 1222 1223 if (entryp->police.burst) { 1224 fmi = kzalloc(sizeof(*fmi), GFP_KERNEL); 1225 if (!fmi) { 1226 err = -ENOMEM; 1227 goto free_sfi; 1228 } 1229 refcount_set(&fmi->refcount, 1); 1230 fmi->cir = entryp->police.rate_bytes_ps; 1231 fmi->cbs = entryp->police.burst; 1232 fmi->index = entryp->hw_index; 1233 filter->flags |= ENETC_PSFP_FLAGS_FMI; 1234 filter->fmi_index = fmi->index; 1235 sfi->meter_id = fmi->index; 1236 } 1237 1238 if (entryp->police.mtu) 1239 sfi->maxsdu = entryp->police.mtu; 1240 } 1241 1242 /* prio ref the filter prio */ 1243 if (f->common.prio && f->common.prio <= BIT(3)) 1244 sfi->prio = f->common.prio - 1; 1245 else 1246 sfi->prio = ENETC_PSFP_WILDCARD; 1247 1248 old_sfi = enetc_psfp_check_sfi(sfi); 1249 if (!old_sfi) { 1250 int index; 1251 1252 index = enetc_get_free_index(priv); 1253 if (sfi->handle < 0) { 1254 NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!"); 1255 err = -ENOSPC; 1256 goto free_fmi; 1257 } 1258 1259 sfi->index = index; 1260 sfi->handle = index + HANDLE_OFFSET; 1261 /* Update the stream filter handle also */ 1262 filter->sid.handle = sfi->handle; 1263 filter->sfi_index = sfi->index; 1264 sfi_overwrite = 0; 1265 } else { 1266 filter->sfi_index = old_sfi->index; 1267 filter->sid.handle = old_sfi->handle; 1268 sfi_overwrite = 1; 1269 } 1270 1271 err = enetc_psfp_hw_set(priv, &filter->sid, 1272 sfi_overwrite ? NULL : sfi, sgi, fmi); 1273 if (err) 1274 goto free_fmi; 1275 1276 spin_lock(&epsfp.psfp_lock); 1277 if (filter->flags & ENETC_PSFP_FLAGS_FMI) { 1278 old_fmi = enetc_get_meter_by_index(filter->fmi_index); 1279 if (old_fmi) { 1280 fmi->refcount = old_fmi->refcount; 1281 refcount_set(&fmi->refcount, 1282 refcount_read(&old_fmi->refcount) + 1); 1283 hlist_del(&old_fmi->node); 1284 kfree(old_fmi); 1285 } 1286 hlist_add_head(&fmi->node, &epsfp.psfp_meter_list); 1287 } 1288 1289 /* Remove the old node if exist and update with a new node */ 1290 old_sgi = enetc_get_gate_by_index(filter->sgi_index); 1291 if (old_sgi) { 1292 refcount_set(&sgi->refcount, 1293 refcount_read(&old_sgi->refcount) + 1); 1294 hlist_del(&old_sgi->node); 1295 kfree(old_sgi); 1296 } 1297 1298 hlist_add_head(&sgi->node, &epsfp.psfp_gate_list); 1299 1300 if (!old_sfi) { 1301 hlist_add_head(&sfi->node, &epsfp.psfp_filter_list); 1302 set_bit(sfi->index, epsfp.psfp_sfi_bitmap); 1303 } else { 1304 kfree(sfi); 1305 refcount_inc(&old_sfi->refcount); 1306 } 1307 1308 old_filter = enetc_get_stream_by_index(filter->sid.index); 1309 if (old_filter) 1310 remove_one_chain(priv, old_filter); 1311 1312 filter->stats.lastused = jiffies; 1313 hlist_add_head(&filter->node, &epsfp.stream_list); 1314 1315 spin_unlock(&epsfp.psfp_lock); 1316 1317 return 0; 1318 1319 free_fmi: 1320 kfree(fmi); 1321 free_sfi: 1322 kfree(sfi); 1323 free_gate: 1324 kfree(sgi); 1325 free_filter: 1326 kfree(filter); 1327 1328 return err; 1329 } 1330 1331 static int enetc_config_clsflower(struct enetc_ndev_priv *priv, 1332 struct flow_cls_offload *cls_flower) 1333 { 1334 struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); 1335 struct netlink_ext_ack *extack = cls_flower->common.extack; 1336 struct flow_dissector *dissector = rule->match.dissector; 1337 struct flow_action *action = &rule->action; 1338 struct flow_action_entry *entry; 1339 struct actions_fwd *fwd; 1340 u64 actions = 0; 1341 int i, err; 1342 1343 if (!flow_action_has_entries(action)) { 1344 NL_SET_ERR_MSG_MOD(extack, "At least one action is needed"); 1345 return -EINVAL; 1346 } 1347 1348 flow_action_for_each(i, entry, action) 1349 actions |= BIT(entry->id); 1350 1351 fwd = enetc_check_flow_actions(actions, dissector->used_keys); 1352 if (!fwd) { 1353 NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!"); 1354 return -EOPNOTSUPP; 1355 } 1356 1357 if (fwd->output & FILTER_ACTION_TYPE_PSFP) { 1358 err = enetc_psfp_parse_clsflower(priv, cls_flower); 1359 if (err) { 1360 NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs"); 1361 return err; 1362 } 1363 } else { 1364 NL_SET_ERR_MSG_MOD(extack, "Unsupported actions"); 1365 return -EOPNOTSUPP; 1366 } 1367 1368 return 0; 1369 } 1370 1371 static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv, 1372 struct flow_cls_offload *f) 1373 { 1374 struct enetc_stream_filter *filter; 1375 struct netlink_ext_ack *extack = f->common.extack; 1376 int err; 1377 1378 if (f->common.chain_index >= priv->psfp_cap.max_streamid) { 1379 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); 1380 return -ENOSPC; 1381 } 1382 1383 filter = enetc_get_stream_by_index(f->common.chain_index); 1384 if (!filter) 1385 return -EINVAL; 1386 1387 err = enetc_streamid_hw_set(priv, &filter->sid, false); 1388 if (err) 1389 return err; 1390 1391 remove_one_chain(priv, filter); 1392 1393 return 0; 1394 } 1395 1396 static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv, 1397 struct flow_cls_offload *f) 1398 { 1399 return enetc_psfp_destroy_clsflower(priv, f); 1400 } 1401 1402 static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv, 1403 struct flow_cls_offload *f) 1404 { 1405 struct psfp_streamfilter_counters counters = {}; 1406 struct enetc_stream_filter *filter; 1407 struct flow_stats stats = {}; 1408 int err; 1409 1410 filter = enetc_get_stream_by_index(f->common.chain_index); 1411 if (!filter) 1412 return -EINVAL; 1413 1414 err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters); 1415 if (err) 1416 return -EINVAL; 1417 1418 spin_lock(&epsfp.psfp_lock); 1419 stats.pkts = counters.matching_frames_count + 1420 counters.not_passing_sdu_count - 1421 filter->stats.pkts; 1422 stats.drops = counters.not_passing_frames_count + 1423 counters.not_passing_sdu_count + 1424 counters.red_frames_count - 1425 filter->stats.drops; 1426 stats.lastused = filter->stats.lastused; 1427 filter->stats.pkts += stats.pkts; 1428 filter->stats.drops += stats.drops; 1429 spin_unlock(&epsfp.psfp_lock); 1430 1431 flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, 1432 stats.lastused, FLOW_ACTION_HW_STATS_DELAYED); 1433 1434 return 0; 1435 } 1436 1437 static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv, 1438 struct flow_cls_offload *cls_flower) 1439 { 1440 switch (cls_flower->command) { 1441 case FLOW_CLS_REPLACE: 1442 return enetc_config_clsflower(priv, cls_flower); 1443 case FLOW_CLS_DESTROY: 1444 return enetc_destroy_clsflower(priv, cls_flower); 1445 case FLOW_CLS_STATS: 1446 return enetc_psfp_get_stats(priv, cls_flower); 1447 default: 1448 return -EOPNOTSUPP; 1449 } 1450 } 1451 1452 static inline void clean_psfp_sfi_bitmap(void) 1453 { 1454 bitmap_free(epsfp.psfp_sfi_bitmap); 1455 epsfp.psfp_sfi_bitmap = NULL; 1456 } 1457 1458 static void clean_stream_list(void) 1459 { 1460 struct enetc_stream_filter *s; 1461 struct hlist_node *tmp; 1462 1463 hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) { 1464 hlist_del(&s->node); 1465 kfree(s); 1466 } 1467 } 1468 1469 static void clean_sfi_list(void) 1470 { 1471 struct enetc_psfp_filter *sfi; 1472 struct hlist_node *tmp; 1473 1474 hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) { 1475 hlist_del(&sfi->node); 1476 kfree(sfi); 1477 } 1478 } 1479 1480 static void clean_sgi_list(void) 1481 { 1482 struct enetc_psfp_gate *sgi; 1483 struct hlist_node *tmp; 1484 1485 hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) { 1486 hlist_del(&sgi->node); 1487 kfree(sgi); 1488 } 1489 } 1490 1491 static void clean_psfp_all(void) 1492 { 1493 /* Disable all list nodes and free all memory */ 1494 clean_sfi_list(); 1495 clean_sgi_list(); 1496 clean_stream_list(); 1497 epsfp.dev_bitmap = 0; 1498 clean_psfp_sfi_bitmap(); 1499 } 1500 1501 int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1502 void *cb_priv) 1503 { 1504 struct net_device *ndev = cb_priv; 1505 1506 if (!tc_can_offload(ndev)) 1507 return -EOPNOTSUPP; 1508 1509 switch (type) { 1510 case TC_SETUP_CLSFLOWER: 1511 return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data); 1512 default: 1513 return -EOPNOTSUPP; 1514 } 1515 } 1516 1517 int enetc_set_psfp(struct net_device *ndev, bool en) 1518 { 1519 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1520 int err; 1521 1522 if (en) { 1523 err = enetc_psfp_enable(priv); 1524 if (err) 1525 return err; 1526 1527 priv->active_offloads |= ENETC_F_QCI; 1528 return 0; 1529 } 1530 1531 err = enetc_psfp_disable(priv); 1532 if (err) 1533 return err; 1534 1535 priv->active_offloads &= ~ENETC_F_QCI; 1536 1537 return 0; 1538 } 1539 1540 int enetc_psfp_init(struct enetc_ndev_priv *priv) 1541 { 1542 if (epsfp.psfp_sfi_bitmap) 1543 return 0; 1544 1545 epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter, 1546 GFP_KERNEL); 1547 if (!epsfp.psfp_sfi_bitmap) 1548 return -ENOMEM; 1549 1550 spin_lock_init(&epsfp.psfp_lock); 1551 1552 if (list_empty(&enetc_block_cb_list)) 1553 epsfp.dev_bitmap = 0; 1554 1555 return 0; 1556 } 1557 1558 int enetc_psfp_clean(struct enetc_ndev_priv *priv) 1559 { 1560 if (!list_empty(&enetc_block_cb_list)) 1561 return -EBUSY; 1562 1563 clean_psfp_all(); 1564 1565 return 0; 1566 } 1567 1568 int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data) 1569 { 1570 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1571 struct flow_block_offload *f = type_data; 1572 int port, err; 1573 1574 err = flow_block_cb_setup_simple(f, &enetc_block_cb_list, 1575 enetc_setup_tc_block_cb, 1576 ndev, ndev, true); 1577 if (err) 1578 return err; 1579 1580 switch (f->command) { 1581 case FLOW_BLOCK_BIND: 1582 port = enetc_pf_to_port(priv->si->pdev); 1583 if (port < 0) 1584 return -EINVAL; 1585 1586 set_bit(port, &epsfp.dev_bitmap); 1587 break; 1588 case FLOW_BLOCK_UNBIND: 1589 port = enetc_pf_to_port(priv->si->pdev); 1590 if (port < 0) 1591 return -EINVAL; 1592 1593 clear_bit(port, &epsfp.dev_bitmap); 1594 if (!epsfp.dev_bitmap) 1595 clean_psfp_all(); 1596 break; 1597 } 1598 1599 return 0; 1600 } 1601 1602 int enetc_qos_query_caps(struct net_device *ndev, void *type_data) 1603 { 1604 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1605 struct tc_query_caps_base *base = type_data; 1606 struct enetc_si *si = priv->si; 1607 1608 switch (base->type) { 1609 case TC_SETUP_QDISC_TAPRIO: { 1610 struct tc_taprio_caps *caps = base->caps; 1611 1612 if (si->hw_features & ENETC_SI_F_QBV) 1613 caps->supports_queue_max_sdu = true; 1614 1615 return 0; 1616 } 1617 default: 1618 return -EOPNOTSUPP; 1619 } 1620 } 1621