1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2019 NXP */ 3 4 #include "enetc.h" 5 6 #include <net/pkt_sched.h> 7 #include <linux/math64.h> 8 #include <linux/refcount.h> 9 #include <net/pkt_cls.h> 10 #include <net/tc_act/tc_gate.h> 11 12 static u16 enetc_get_max_gcl_len(struct enetc_hw *hw) 13 { 14 return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET) 15 & ENETC_QBV_MAX_GCL_LEN_MASK; 16 } 17 18 void enetc_sched_speed_set(struct net_device *ndev) 19 { 20 struct enetc_ndev_priv *priv = netdev_priv(ndev); 21 struct phy_device *phydev = ndev->phydev; 22 u32 old_speed = priv->speed; 23 u32 speed, pspeed; 24 25 if (phydev->speed == old_speed) 26 return; 27 28 speed = phydev->speed; 29 switch (speed) { 30 case SPEED_1000: 31 pspeed = ENETC_PMR_PSPEED_1000M; 32 break; 33 case SPEED_2500: 34 pspeed = ENETC_PMR_PSPEED_2500M; 35 break; 36 case SPEED_100: 37 pspeed = ENETC_PMR_PSPEED_100M; 38 break; 39 case SPEED_10: 40 default: 41 pspeed = ENETC_PMR_PSPEED_10M; 42 } 43 44 priv->speed = speed; 45 enetc_port_wr(&priv->si->hw, ENETC_PMR, 46 (enetc_port_rd(&priv->si->hw, ENETC_PMR) 47 & (~ENETC_PMR_PSPEED_MASK)) 48 | pspeed); 49 } 50 51 static int enetc_setup_taprio(struct net_device *ndev, 52 struct tc_taprio_qopt_offload *admin_conf) 53 { 54 struct enetc_ndev_priv *priv = netdev_priv(ndev); 55 struct enetc_cbd cbd = {.cmd = 0}; 56 struct tgs_gcl_conf *gcl_config; 57 struct tgs_gcl_data *gcl_data; 58 struct gce *gce; 59 dma_addr_t dma; 60 u16 data_size; 61 u16 gcl_len; 62 u32 tge; 63 int err; 64 int i; 65 66 if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw)) 67 return -EINVAL; 68 gcl_len = admin_conf->num_entries; 69 70 tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET); 71 if (!admin_conf->enable) { 72 enetc_wr(&priv->si->hw, 73 ENETC_QBV_PTGCR_OFFSET, 74 tge & (~ENETC_QBV_TGE)); 75 return 0; 76 } 77 78 if (admin_conf->cycle_time > U32_MAX || 79 admin_conf->cycle_time_extension > U32_MAX) 80 return -EINVAL; 81 82 /* Configure the (administrative) gate control list using the 83 * control BD descriptor. 84 */ 85 gcl_config = &cbd.gcl_conf; 86 87 data_size = struct_size(gcl_data, entry, gcl_len); 88 gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 89 if (!gcl_data) 90 return -ENOMEM; 91 92 gce = (struct gce *)(gcl_data + 1); 93 94 /* Set all gates open as default */ 95 gcl_config->atc = 0xff; 96 gcl_config->acl_len = cpu_to_le16(gcl_len); 97 98 if (!admin_conf->base_time) { 99 gcl_data->btl = 100 cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0)); 101 gcl_data->bth = 102 cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1)); 103 } else { 104 gcl_data->btl = 105 cpu_to_le32(lower_32_bits(admin_conf->base_time)); 106 gcl_data->bth = 107 cpu_to_le32(upper_32_bits(admin_conf->base_time)); 108 } 109 110 gcl_data->ct = cpu_to_le32(admin_conf->cycle_time); 111 gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension); 112 113 for (i = 0; i < gcl_len; i++) { 114 struct tc_taprio_sched_entry *temp_entry; 115 struct gce *temp_gce = gce + i; 116 117 temp_entry = &admin_conf->entries[i]; 118 119 temp_gce->gate = (u8)temp_entry->gate_mask; 120 temp_gce->period = cpu_to_le32(temp_entry->interval); 121 } 122 123 cbd.length = cpu_to_le16(data_size); 124 cbd.status_flags = 0; 125 126 dma = dma_map_single(&priv->si->pdev->dev, gcl_data, 127 data_size, DMA_TO_DEVICE); 128 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 129 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 130 kfree(gcl_data); 131 return -ENOMEM; 132 } 133 134 cbd.addr[0] = lower_32_bits(dma); 135 cbd.addr[1] = upper_32_bits(dma); 136 cbd.cls = BDCR_CMD_PORT_GCL; 137 cbd.status_flags = 0; 138 139 enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET, 140 tge | ENETC_QBV_TGE); 141 142 err = enetc_send_cmd(priv->si, &cbd); 143 if (err) 144 enetc_wr(&priv->si->hw, 145 ENETC_QBV_PTGCR_OFFSET, 146 tge & (~ENETC_QBV_TGE)); 147 148 dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE); 149 kfree(gcl_data); 150 151 return err; 152 } 153 154 int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) 155 { 156 struct tc_taprio_qopt_offload *taprio = type_data; 157 struct enetc_ndev_priv *priv = netdev_priv(ndev); 158 int err; 159 int i; 160 161 /* TSD and Qbv are mutually exclusive in hardware */ 162 for (i = 0; i < priv->num_tx_rings; i++) 163 if (priv->tx_ring[i]->tsd_enable) 164 return -EBUSY; 165 166 for (i = 0; i < priv->num_tx_rings; i++) 167 enetc_set_bdr_prio(&priv->si->hw, 168 priv->tx_ring[i]->index, 169 taprio->enable ? i : 0); 170 171 err = enetc_setup_taprio(ndev, taprio); 172 173 if (err) 174 for (i = 0; i < priv->num_tx_rings; i++) 175 enetc_set_bdr_prio(&priv->si->hw, 176 priv->tx_ring[i]->index, 177 taprio->enable ? 0 : i); 178 179 return err; 180 } 181 182 static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc) 183 { 184 return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE; 185 } 186 187 static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc) 188 { 189 return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK; 190 } 191 192 int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) 193 { 194 struct enetc_ndev_priv *priv = netdev_priv(ndev); 195 struct tc_cbs_qopt_offload *cbs = type_data; 196 u32 port_transmit_rate = priv->speed; 197 u8 tc_nums = netdev_get_num_tc(ndev); 198 struct enetc_si *si = priv->si; 199 u32 hi_credit_bit, hi_credit_reg; 200 u32 max_interference_size; 201 u32 port_frame_max_size; 202 u8 tc = cbs->queue; 203 u8 prio_top, prio_next; 204 int bw_sum = 0; 205 u8 bw; 206 207 prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1); 208 prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2); 209 210 /* Support highest prio and second prio tc in cbs mode */ 211 if (tc != prio_top && tc != prio_next) 212 return -EOPNOTSUPP; 213 214 if (!cbs->enable) { 215 /* Make sure the other TC that are numerically 216 * lower than this TC have been disabled. 217 */ 218 if (tc == prio_top && 219 enetc_get_cbs_enable(&si->hw, prio_next)) { 220 dev_err(&ndev->dev, 221 "Disable TC%d before disable TC%d\n", 222 prio_next, tc); 223 return -EINVAL; 224 } 225 226 enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0); 227 enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0); 228 229 return 0; 230 } 231 232 if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L || 233 cbs->idleslope < 0 || cbs->sendslope > 0) 234 return -EOPNOTSUPP; 235 236 port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 237 238 bw = cbs->idleslope / (port_transmit_rate * 10UL); 239 240 /* Make sure the other TC that are numerically 241 * higher than this TC have been enabled. 242 */ 243 if (tc == prio_next) { 244 if (!enetc_get_cbs_enable(&si->hw, prio_top)) { 245 dev_err(&ndev->dev, 246 "Enable TC%d first before enable TC%d\n", 247 prio_top, prio_next); 248 return -EINVAL; 249 } 250 bw_sum += enetc_get_cbs_bw(&si->hw, prio_top); 251 } 252 253 if (bw_sum + bw >= 100) { 254 dev_err(&ndev->dev, 255 "The sum of all CBS Bandwidth can't exceed 100\n"); 256 return -EINVAL; 257 } 258 259 enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc)); 260 261 /* For top prio TC, the max_interfrence_size is maxSizedFrame. 262 * 263 * For next prio TC, the max_interfrence_size is calculated as below: 264 * 265 * max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra) 266 * 267 * - RA: idleSlope for AVB Class A 268 * - R0: port transmit rate 269 * - M0: maximum sized frame for the port 270 * - MA: maximum sized frame for AVB Class A 271 */ 272 273 if (tc == prio_top) { 274 max_interference_size = port_frame_max_size * 8; 275 } else { 276 u32 m0, ma, r0, ra; 277 278 m0 = port_frame_max_size * 8; 279 ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8; 280 ra = enetc_get_cbs_bw(&si->hw, prio_top) * 281 port_transmit_rate * 10000ULL; 282 r0 = port_transmit_rate * 1000000ULL; 283 max_interference_size = m0 + ma + 284 (u32)div_u64((u64)ra * m0, r0 - ra); 285 } 286 287 /* hiCredit bits calculate by: 288 * 289 * maxSizedFrame * (idleSlope/portTxRate) 290 */ 291 hi_credit_bit = max_interference_size * bw / 100; 292 293 /* hiCredit bits to hiCredit register need to calculated as: 294 * 295 * (enetClockFrequency / portTransmitRate) * 100 296 */ 297 hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit, 298 port_transmit_rate * 1000000ULL); 299 300 enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg); 301 302 /* Set bw register and enable this traffic class */ 303 enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE); 304 305 return 0; 306 } 307 308 int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) 309 { 310 struct enetc_ndev_priv *priv = netdev_priv(ndev); 311 struct tc_etf_qopt_offload *qopt = type_data; 312 u8 tc_nums = netdev_get_num_tc(ndev); 313 int tc; 314 315 if (!tc_nums) 316 return -EOPNOTSUPP; 317 318 tc = qopt->queue; 319 320 if (tc < 0 || tc >= priv->num_tx_rings) 321 return -EINVAL; 322 323 /* Do not support TXSTART and TX CSUM offload simutaniously */ 324 if (ndev->features & NETIF_F_CSUM_MASK) 325 return -EBUSY; 326 327 /* TSD and Qbv are mutually exclusive in hardware */ 328 if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) 329 return -EBUSY; 330 331 priv->tx_ring[tc]->tsd_enable = qopt->enable; 332 enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc), 333 qopt->enable ? ENETC_TSDE : 0); 334 335 return 0; 336 } 337 338 enum streamid_type { 339 STREAMID_TYPE_RESERVED = 0, 340 STREAMID_TYPE_NULL, 341 STREAMID_TYPE_SMAC, 342 }; 343 344 enum streamid_vlan_tagged { 345 STREAMID_VLAN_RESERVED = 0, 346 STREAMID_VLAN_TAGGED, 347 STREAMID_VLAN_UNTAGGED, 348 STREAMID_VLAN_ALL, 349 }; 350 351 #define ENETC_PSFP_WILDCARD -1 352 #define HANDLE_OFFSET 100 353 354 enum forward_type { 355 FILTER_ACTION_TYPE_PSFP = BIT(0), 356 FILTER_ACTION_TYPE_ACL = BIT(1), 357 FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0), 358 }; 359 360 /* This is for limit output type for input actions */ 361 struct actions_fwd { 362 u64 actions; 363 u64 keys; /* include the must needed keys */ 364 enum forward_type output; 365 }; 366 367 struct psfp_streamfilter_counters { 368 u64 matching_frames_count; 369 u64 passing_frames_count; 370 u64 not_passing_frames_count; 371 u64 passing_sdu_count; 372 u64 not_passing_sdu_count; 373 u64 red_frames_count; 374 }; 375 376 struct enetc_streamid { 377 u32 index; 378 union { 379 u8 src_mac[6]; 380 u8 dst_mac[6]; 381 }; 382 u8 filtertype; 383 u16 vid; 384 u8 tagged; 385 s32 handle; 386 }; 387 388 struct enetc_psfp_filter { 389 u32 index; 390 s32 handle; 391 s8 prio; 392 u32 maxsdu; 393 u32 gate_id; 394 s32 meter_id; 395 refcount_t refcount; 396 struct hlist_node node; 397 }; 398 399 struct enetc_psfp_gate { 400 u32 index; 401 s8 init_ipv; 402 u64 basetime; 403 u64 cycletime; 404 u64 cycletimext; 405 u32 num_entries; 406 refcount_t refcount; 407 struct hlist_node node; 408 struct action_gate_entry entries[0]; 409 }; 410 411 /* Only enable the green color frame now 412 * Will add eir and ebs color blind, couple flag etc when 413 * policing action add more offloading parameters 414 */ 415 struct enetc_psfp_meter { 416 u32 index; 417 u32 cir; 418 u32 cbs; 419 refcount_t refcount; 420 struct hlist_node node; 421 }; 422 423 #define ENETC_PSFP_FLAGS_FMI BIT(0) 424 425 struct enetc_stream_filter { 426 struct enetc_streamid sid; 427 u32 sfi_index; 428 u32 sgi_index; 429 u32 flags; 430 u32 fmi_index; 431 struct flow_stats stats; 432 struct hlist_node node; 433 }; 434 435 struct enetc_psfp { 436 unsigned long dev_bitmap; 437 unsigned long *psfp_sfi_bitmap; 438 struct hlist_head stream_list; 439 struct hlist_head psfp_filter_list; 440 struct hlist_head psfp_gate_list; 441 struct hlist_head psfp_meter_list; 442 spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */ 443 }; 444 445 static struct actions_fwd enetc_act_fwd[] = { 446 { 447 BIT(FLOW_ACTION_GATE), 448 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), 449 FILTER_ACTION_TYPE_PSFP 450 }, 451 { 452 BIT(FLOW_ACTION_POLICE) | 453 BIT(FLOW_ACTION_GATE), 454 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), 455 FILTER_ACTION_TYPE_PSFP 456 }, 457 /* example for ACL actions */ 458 { 459 BIT(FLOW_ACTION_DROP), 460 0, 461 FILTER_ACTION_TYPE_ACL 462 } 463 }; 464 465 static struct enetc_psfp epsfp = { 466 .psfp_sfi_bitmap = NULL, 467 }; 468 469 static LIST_HEAD(enetc_block_cb_list); 470 471 static inline int enetc_get_port(struct enetc_ndev_priv *priv) 472 { 473 return priv->si->pdev->devfn & 0x7; 474 } 475 476 /* Stream Identity Entry Set Descriptor */ 477 static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, 478 struct enetc_streamid *sid, 479 u8 enable) 480 { 481 struct enetc_cbd cbd = {.cmd = 0}; 482 struct streamid_data *si_data; 483 struct streamid_conf *si_conf; 484 u16 data_size; 485 dma_addr_t dma; 486 int err; 487 488 if (sid->index >= priv->psfp_cap.max_streamid) 489 return -EINVAL; 490 491 if (sid->filtertype != STREAMID_TYPE_NULL && 492 sid->filtertype != STREAMID_TYPE_SMAC) 493 return -EOPNOTSUPP; 494 495 /* Disable operation before enable */ 496 cbd.index = cpu_to_le16((u16)sid->index); 497 cbd.cls = BDCR_CMD_STREAM_IDENTIFY; 498 cbd.status_flags = 0; 499 500 data_size = sizeof(struct streamid_data); 501 si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 502 cbd.length = cpu_to_le16(data_size); 503 504 dma = dma_map_single(&priv->si->pdev->dev, si_data, 505 data_size, DMA_FROM_DEVICE); 506 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 507 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 508 kfree(si_data); 509 return -ENOMEM; 510 } 511 512 cbd.addr[0] = lower_32_bits(dma); 513 cbd.addr[1] = upper_32_bits(dma); 514 eth_broadcast_addr(si_data->dmac); 515 si_data->vid_vidm_tg = 516 cpu_to_le16(ENETC_CBDR_SID_VID_MASK 517 + ((0x3 << 14) | ENETC_CBDR_SID_VIDM)); 518 519 si_conf = &cbd.sid_set; 520 /* Only one port supported for one entry, set itself */ 521 si_conf->iports = 1 << enetc_get_port(priv); 522 si_conf->id_type = 1; 523 si_conf->oui[2] = 0x0; 524 si_conf->oui[1] = 0x80; 525 si_conf->oui[0] = 0xC2; 526 527 err = enetc_send_cmd(priv->si, &cbd); 528 if (err) 529 return -EINVAL; 530 531 if (!enable) { 532 kfree(si_data); 533 return 0; 534 } 535 536 /* Enable the entry overwrite again incase space flushed by hardware */ 537 memset(&cbd, 0, sizeof(cbd)); 538 539 cbd.index = cpu_to_le16((u16)sid->index); 540 cbd.cmd = 0; 541 cbd.cls = BDCR_CMD_STREAM_IDENTIFY; 542 cbd.status_flags = 0; 543 544 si_conf->en = 0x80; 545 si_conf->stream_handle = cpu_to_le32(sid->handle); 546 si_conf->iports = 1 << enetc_get_port(priv); 547 si_conf->id_type = sid->filtertype; 548 si_conf->oui[2] = 0x0; 549 si_conf->oui[1] = 0x80; 550 si_conf->oui[0] = 0xC2; 551 552 memset(si_data, 0, data_size); 553 554 cbd.length = cpu_to_le16(data_size); 555 556 cbd.addr[0] = lower_32_bits(dma); 557 cbd.addr[1] = upper_32_bits(dma); 558 559 /* VIDM default to be 1. 560 * VID Match. If set (b1) then the VID must match, otherwise 561 * any VID is considered a match. VIDM setting is only used 562 * when TG is set to b01. 563 */ 564 if (si_conf->id_type == STREAMID_TYPE_NULL) { 565 ether_addr_copy(si_data->dmac, sid->dst_mac); 566 si_data->vid_vidm_tg = 567 cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) + 568 ((((u16)(sid->tagged) & 0x3) << 14) 569 | ENETC_CBDR_SID_VIDM)); 570 } else if (si_conf->id_type == STREAMID_TYPE_SMAC) { 571 ether_addr_copy(si_data->smac, sid->src_mac); 572 si_data->vid_vidm_tg = 573 cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) + 574 ((((u16)(sid->tagged) & 0x3) << 14) 575 | ENETC_CBDR_SID_VIDM)); 576 } 577 578 err = enetc_send_cmd(priv->si, &cbd); 579 kfree(si_data); 580 581 return err; 582 } 583 584 /* Stream Filter Instance Set Descriptor */ 585 static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv, 586 struct enetc_psfp_filter *sfi, 587 u8 enable) 588 { 589 struct enetc_cbd cbd = {.cmd = 0}; 590 struct sfi_conf *sfi_config; 591 592 cbd.index = cpu_to_le16(sfi->index); 593 cbd.cls = BDCR_CMD_STREAM_FILTER; 594 cbd.status_flags = 0x80; 595 cbd.length = cpu_to_le16(1); 596 597 sfi_config = &cbd.sfi_conf; 598 if (!enable) 599 goto exit; 600 601 sfi_config->en = 0x80; 602 603 if (sfi->handle >= 0) { 604 sfi_config->stream_handle = 605 cpu_to_le32(sfi->handle); 606 sfi_config->sthm |= 0x80; 607 } 608 609 sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id); 610 sfi_config->input_ports = 1 << enetc_get_port(priv); 611 612 /* The priority value which may be matched against the 613 * frame’s priority value to determine a match for this entry. 614 */ 615 if (sfi->prio >= 0) 616 sfi_config->multi |= (sfi->prio & 0x7) | 0x8; 617 618 /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX 619 * field as being either an MSDU value or an index into the Flow 620 * Meter Instance table. 621 */ 622 if (sfi->maxsdu) { 623 sfi_config->msdu = 624 cpu_to_le16(sfi->maxsdu); 625 sfi_config->multi |= 0x40; 626 } 627 628 if (sfi->meter_id >= 0) { 629 sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id); 630 sfi_config->multi |= 0x80; 631 } 632 633 exit: 634 return enetc_send_cmd(priv->si, &cbd); 635 } 636 637 static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv, 638 u32 index, 639 struct psfp_streamfilter_counters *cnt) 640 { 641 struct enetc_cbd cbd = { .cmd = 2 }; 642 struct sfi_counter_data *data_buf; 643 dma_addr_t dma; 644 u16 data_size; 645 int err; 646 647 cbd.index = cpu_to_le16((u16)index); 648 cbd.cmd = 2; 649 cbd.cls = BDCR_CMD_STREAM_FILTER; 650 cbd.status_flags = 0; 651 652 data_size = sizeof(struct sfi_counter_data); 653 data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 654 if (!data_buf) 655 return -ENOMEM; 656 657 dma = dma_map_single(&priv->si->pdev->dev, data_buf, 658 data_size, DMA_FROM_DEVICE); 659 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 660 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 661 err = -ENOMEM; 662 goto exit; 663 } 664 cbd.addr[0] = lower_32_bits(dma); 665 cbd.addr[1] = upper_32_bits(dma); 666 667 cbd.length = cpu_to_le16(data_size); 668 669 err = enetc_send_cmd(priv->si, &cbd); 670 if (err) 671 goto exit; 672 673 cnt->matching_frames_count = 674 ((u64)le32_to_cpu(data_buf->matchh) << 32) 675 + data_buf->matchl; 676 677 cnt->not_passing_sdu_count = 678 ((u64)le32_to_cpu(data_buf->msdu_droph) << 32) 679 + data_buf->msdu_dropl; 680 681 cnt->passing_sdu_count = cnt->matching_frames_count 682 - cnt->not_passing_sdu_count; 683 684 cnt->not_passing_frames_count = 685 ((u64)le32_to_cpu(data_buf->stream_gate_droph) << 32) 686 + le32_to_cpu(data_buf->stream_gate_dropl); 687 688 cnt->passing_frames_count = cnt->matching_frames_count 689 - cnt->not_passing_sdu_count 690 - cnt->not_passing_frames_count; 691 692 cnt->red_frames_count = 693 ((u64)le32_to_cpu(data_buf->flow_meter_droph) << 32) 694 + le32_to_cpu(data_buf->flow_meter_dropl); 695 696 exit: 697 kfree(data_buf); 698 return err; 699 } 700 701 static u64 get_ptp_now(struct enetc_hw *hw) 702 { 703 u64 now_lo, now_hi, now; 704 705 now_lo = enetc_rd(hw, ENETC_SICTR0); 706 now_hi = enetc_rd(hw, ENETC_SICTR1); 707 now = now_lo | now_hi << 32; 708 709 return now; 710 } 711 712 static int get_start_ns(u64 now, u64 cycle, u64 *start) 713 { 714 u64 n; 715 716 if (!cycle) 717 return -EFAULT; 718 719 n = div64_u64(now, cycle); 720 721 *start = (n + 1) * cycle; 722 723 return 0; 724 } 725 726 /* Stream Gate Instance Set Descriptor */ 727 static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, 728 struct enetc_psfp_gate *sgi, 729 u8 enable) 730 { 731 struct enetc_cbd cbd = { .cmd = 0 }; 732 struct sgi_table *sgi_config; 733 struct sgcl_conf *sgcl_config; 734 struct sgcl_data *sgcl_data; 735 struct sgce *sgce; 736 dma_addr_t dma; 737 u16 data_size; 738 int err, i; 739 u64 now; 740 741 cbd.index = cpu_to_le16(sgi->index); 742 cbd.cmd = 0; 743 cbd.cls = BDCR_CMD_STREAM_GCL; 744 cbd.status_flags = 0x80; 745 746 /* disable */ 747 if (!enable) 748 return enetc_send_cmd(priv->si, &cbd); 749 750 if (!sgi->num_entries) 751 return 0; 752 753 if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist || 754 !sgi->cycletime) 755 return -EINVAL; 756 757 /* enable */ 758 sgi_config = &cbd.sgi_table; 759 760 /* Keep open before gate list start */ 761 sgi_config->ocgtst = 0x80; 762 763 sgi_config->oipv = (sgi->init_ipv < 0) ? 764 0x0 : ((sgi->init_ipv & 0x7) | 0x8); 765 766 sgi_config->en = 0x80; 767 768 /* Basic config */ 769 err = enetc_send_cmd(priv->si, &cbd); 770 if (err) 771 return -EINVAL; 772 773 memset(&cbd, 0, sizeof(cbd)); 774 775 cbd.index = cpu_to_le16(sgi->index); 776 cbd.cmd = 1; 777 cbd.cls = BDCR_CMD_STREAM_GCL; 778 cbd.status_flags = 0; 779 780 sgcl_config = &cbd.sgcl_conf; 781 782 sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3; 783 784 data_size = struct_size(sgcl_data, sgcl, sgi->num_entries); 785 786 sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 787 if (!sgcl_data) 788 return -ENOMEM; 789 790 cbd.length = cpu_to_le16(data_size); 791 792 dma = dma_map_single(&priv->si->pdev->dev, 793 sgcl_data, data_size, 794 DMA_FROM_DEVICE); 795 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 796 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 797 kfree(sgcl_data); 798 return -ENOMEM; 799 } 800 801 cbd.addr[0] = lower_32_bits(dma); 802 cbd.addr[1] = upper_32_bits(dma); 803 804 sgce = &sgcl_data->sgcl[0]; 805 806 sgcl_config->agtst = 0x80; 807 808 sgcl_data->ct = cpu_to_le32(sgi->cycletime); 809 sgcl_data->cte = cpu_to_le32(sgi->cycletimext); 810 811 if (sgi->init_ipv >= 0) 812 sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8; 813 814 for (i = 0; i < sgi->num_entries; i++) { 815 struct action_gate_entry *from = &sgi->entries[i]; 816 struct sgce *to = &sgce[i]; 817 818 if (from->gate_state) 819 to->multi |= 0x10; 820 821 if (from->ipv >= 0) 822 to->multi |= ((from->ipv & 0x7) << 5) | 0x08; 823 824 if (from->maxoctets >= 0) { 825 to->multi |= 0x01; 826 to->msdu[0] = from->maxoctets & 0xFF; 827 to->msdu[1] = (from->maxoctets >> 8) & 0xFF; 828 to->msdu[2] = (from->maxoctets >> 16) & 0xFF; 829 } 830 831 to->interval = cpu_to_le32(from->interval); 832 } 833 834 /* If basetime is less than now, calculate start time */ 835 now = get_ptp_now(&priv->si->hw); 836 837 if (sgi->basetime < now) { 838 u64 start; 839 840 err = get_start_ns(now, sgi->cycletime, &start); 841 if (err) 842 goto exit; 843 sgcl_data->btl = cpu_to_le32(lower_32_bits(start)); 844 sgcl_data->bth = cpu_to_le32(upper_32_bits(start)); 845 } else { 846 u32 hi, lo; 847 848 hi = upper_32_bits(sgi->basetime); 849 lo = lower_32_bits(sgi->basetime); 850 sgcl_data->bth = cpu_to_le32(hi); 851 sgcl_data->btl = cpu_to_le32(lo); 852 } 853 854 err = enetc_send_cmd(priv->si, &cbd); 855 856 exit: 857 kfree(sgcl_data); 858 859 return err; 860 } 861 862 static int enetc_flowmeter_hw_set(struct enetc_ndev_priv *priv, 863 struct enetc_psfp_meter *fmi, 864 u8 enable) 865 { 866 struct enetc_cbd cbd = { .cmd = 0 }; 867 struct fmi_conf *fmi_config; 868 u64 temp = 0; 869 870 cbd.index = cpu_to_le16((u16)fmi->index); 871 cbd.cls = BDCR_CMD_FLOW_METER; 872 cbd.status_flags = 0x80; 873 874 if (!enable) 875 return enetc_send_cmd(priv->si, &cbd); 876 877 fmi_config = &cbd.fmi_conf; 878 fmi_config->en = 0x80; 879 880 if (fmi->cir) { 881 temp = (u64)8000 * fmi->cir; 882 temp = div_u64(temp, 3725); 883 } 884 885 fmi_config->cir = cpu_to_le32((u32)temp); 886 fmi_config->cbs = cpu_to_le32(fmi->cbs); 887 888 /* Default for eir ebs disable */ 889 fmi_config->eir = 0; 890 fmi_config->ebs = 0; 891 892 /* Default: 893 * mark red disable 894 * drop on yellow disable 895 * color mode disable 896 * couple flag disable 897 */ 898 fmi_config->conf = 0; 899 900 return enetc_send_cmd(priv->si, &cbd); 901 } 902 903 static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index) 904 { 905 struct enetc_stream_filter *f; 906 907 hlist_for_each_entry(f, &epsfp.stream_list, node) 908 if (f->sid.index == index) 909 return f; 910 911 return NULL; 912 } 913 914 static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index) 915 { 916 struct enetc_psfp_gate *g; 917 918 hlist_for_each_entry(g, &epsfp.psfp_gate_list, node) 919 if (g->index == index) 920 return g; 921 922 return NULL; 923 } 924 925 static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index) 926 { 927 struct enetc_psfp_filter *s; 928 929 hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) 930 if (s->index == index) 931 return s; 932 933 return NULL; 934 } 935 936 static struct enetc_psfp_meter *enetc_get_meter_by_index(u32 index) 937 { 938 struct enetc_psfp_meter *m; 939 940 hlist_for_each_entry(m, &epsfp.psfp_meter_list, node) 941 if (m->index == index) 942 return m; 943 944 return NULL; 945 } 946 947 static struct enetc_psfp_filter 948 *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi) 949 { 950 struct enetc_psfp_filter *s; 951 952 hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) 953 if (s->gate_id == sfi->gate_id && 954 s->prio == sfi->prio && 955 s->maxsdu == sfi->maxsdu && 956 s->meter_id == sfi->meter_id) 957 return s; 958 959 return NULL; 960 } 961 962 static int enetc_get_free_index(struct enetc_ndev_priv *priv) 963 { 964 u32 max_size = priv->psfp_cap.max_psfp_filter; 965 unsigned long index; 966 967 index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size); 968 if (index == max_size) 969 return -1; 970 971 return index; 972 } 973 974 static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index) 975 { 976 struct enetc_psfp_filter *sfi; 977 u8 z; 978 979 sfi = enetc_get_filter_by_index(index); 980 WARN_ON(!sfi); 981 z = refcount_dec_and_test(&sfi->refcount); 982 983 if (z) { 984 enetc_streamfilter_hw_set(priv, sfi, false); 985 hlist_del(&sfi->node); 986 kfree(sfi); 987 clear_bit(index, epsfp.psfp_sfi_bitmap); 988 } 989 } 990 991 static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index) 992 { 993 struct enetc_psfp_gate *sgi; 994 u8 z; 995 996 sgi = enetc_get_gate_by_index(index); 997 WARN_ON(!sgi); 998 z = refcount_dec_and_test(&sgi->refcount); 999 if (z) { 1000 enetc_streamgate_hw_set(priv, sgi, false); 1001 hlist_del(&sgi->node); 1002 kfree(sgi); 1003 } 1004 } 1005 1006 static void flow_meter_unref(struct enetc_ndev_priv *priv, u32 index) 1007 { 1008 struct enetc_psfp_meter *fmi; 1009 u8 z; 1010 1011 fmi = enetc_get_meter_by_index(index); 1012 WARN_ON(!fmi); 1013 z = refcount_dec_and_test(&fmi->refcount); 1014 if (z) { 1015 enetc_flowmeter_hw_set(priv, fmi, false); 1016 hlist_del(&fmi->node); 1017 kfree(fmi); 1018 } 1019 } 1020 1021 static void remove_one_chain(struct enetc_ndev_priv *priv, 1022 struct enetc_stream_filter *filter) 1023 { 1024 if (filter->flags & ENETC_PSFP_FLAGS_FMI) 1025 flow_meter_unref(priv, filter->fmi_index); 1026 1027 stream_gate_unref(priv, filter->sgi_index); 1028 stream_filter_unref(priv, filter->sfi_index); 1029 1030 hlist_del(&filter->node); 1031 kfree(filter); 1032 } 1033 1034 static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv, 1035 struct enetc_streamid *sid, 1036 struct enetc_psfp_filter *sfi, 1037 struct enetc_psfp_gate *sgi, 1038 struct enetc_psfp_meter *fmi) 1039 { 1040 int err; 1041 1042 err = enetc_streamid_hw_set(priv, sid, true); 1043 if (err) 1044 return err; 1045 1046 if (sfi) { 1047 err = enetc_streamfilter_hw_set(priv, sfi, true); 1048 if (err) 1049 goto revert_sid; 1050 } 1051 1052 err = enetc_streamgate_hw_set(priv, sgi, true); 1053 if (err) 1054 goto revert_sfi; 1055 1056 if (fmi) { 1057 err = enetc_flowmeter_hw_set(priv, fmi, true); 1058 if (err) 1059 goto revert_sgi; 1060 } 1061 1062 return 0; 1063 1064 revert_sgi: 1065 enetc_streamgate_hw_set(priv, sgi, false); 1066 revert_sfi: 1067 if (sfi) 1068 enetc_streamfilter_hw_set(priv, sfi, false); 1069 revert_sid: 1070 enetc_streamid_hw_set(priv, sid, false); 1071 return err; 1072 } 1073 1074 static struct actions_fwd *enetc_check_flow_actions(u64 acts, 1075 unsigned int inputkeys) 1076 { 1077 int i; 1078 1079 for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++) 1080 if (acts == enetc_act_fwd[i].actions && 1081 inputkeys & enetc_act_fwd[i].keys) 1082 return &enetc_act_fwd[i]; 1083 1084 return NULL; 1085 } 1086 1087 static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, 1088 struct flow_cls_offload *f) 1089 { 1090 struct flow_action_entry *entryg = NULL, *entryp = NULL; 1091 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 1092 struct netlink_ext_ack *extack = f->common.extack; 1093 struct enetc_stream_filter *filter, *old_filter; 1094 struct enetc_psfp_meter *fmi = NULL, *old_fmi; 1095 struct enetc_psfp_filter *sfi, *old_sfi; 1096 struct enetc_psfp_gate *sgi, *old_sgi; 1097 struct flow_action_entry *entry; 1098 struct action_gate_entry *e; 1099 u8 sfi_overwrite = 0; 1100 int entries_size; 1101 int i, err; 1102 1103 if (f->common.chain_index >= priv->psfp_cap.max_streamid) { 1104 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); 1105 return -ENOSPC; 1106 } 1107 1108 flow_action_for_each(i, entry, &rule->action) 1109 if (entry->id == FLOW_ACTION_GATE) 1110 entryg = entry; 1111 else if (entry->id == FLOW_ACTION_POLICE) 1112 entryp = entry; 1113 1114 /* Not support without gate action */ 1115 if (!entryg) 1116 return -EINVAL; 1117 1118 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 1119 if (!filter) 1120 return -ENOMEM; 1121 1122 filter->sid.index = f->common.chain_index; 1123 1124 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 1125 struct flow_match_eth_addrs match; 1126 1127 flow_rule_match_eth_addrs(rule, &match); 1128 1129 if (!is_zero_ether_addr(match.mask->dst) && 1130 !is_zero_ether_addr(match.mask->src)) { 1131 NL_SET_ERR_MSG_MOD(extack, 1132 "Cannot match on both source and destination MAC"); 1133 err = -EINVAL; 1134 goto free_filter; 1135 } 1136 1137 if (!is_zero_ether_addr(match.mask->dst)) { 1138 if (!is_broadcast_ether_addr(match.mask->dst)) { 1139 NL_SET_ERR_MSG_MOD(extack, 1140 "Masked matching on destination MAC not supported"); 1141 err = -EINVAL; 1142 goto free_filter; 1143 } 1144 ether_addr_copy(filter->sid.dst_mac, match.key->dst); 1145 filter->sid.filtertype = STREAMID_TYPE_NULL; 1146 } 1147 1148 if (!is_zero_ether_addr(match.mask->src)) { 1149 if (!is_broadcast_ether_addr(match.mask->src)) { 1150 NL_SET_ERR_MSG_MOD(extack, 1151 "Masked matching on source MAC not supported"); 1152 err = -EINVAL; 1153 goto free_filter; 1154 } 1155 ether_addr_copy(filter->sid.src_mac, match.key->src); 1156 filter->sid.filtertype = STREAMID_TYPE_SMAC; 1157 } 1158 } else { 1159 NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS"); 1160 err = -EINVAL; 1161 goto free_filter; 1162 } 1163 1164 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 1165 struct flow_match_vlan match; 1166 1167 flow_rule_match_vlan(rule, &match); 1168 if (match.mask->vlan_priority) { 1169 if (match.mask->vlan_priority != 1170 (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) { 1171 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); 1172 err = -EINVAL; 1173 goto free_filter; 1174 } 1175 } 1176 1177 if (match.mask->vlan_id) { 1178 if (match.mask->vlan_id != VLAN_VID_MASK) { 1179 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id"); 1180 err = -EINVAL; 1181 goto free_filter; 1182 } 1183 1184 filter->sid.vid = match.key->vlan_id; 1185 if (!filter->sid.vid) 1186 filter->sid.tagged = STREAMID_VLAN_UNTAGGED; 1187 else 1188 filter->sid.tagged = STREAMID_VLAN_TAGGED; 1189 } 1190 } else { 1191 filter->sid.tagged = STREAMID_VLAN_ALL; 1192 } 1193 1194 /* parsing gate action */ 1195 if (entryg->gate.index >= priv->psfp_cap.max_psfp_gate) { 1196 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); 1197 err = -ENOSPC; 1198 goto free_filter; 1199 } 1200 1201 if (entryg->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) { 1202 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); 1203 err = -ENOSPC; 1204 goto free_filter; 1205 } 1206 1207 entries_size = struct_size(sgi, entries, entryg->gate.num_entries); 1208 sgi = kzalloc(entries_size, GFP_KERNEL); 1209 if (!sgi) { 1210 err = -ENOMEM; 1211 goto free_filter; 1212 } 1213 1214 refcount_set(&sgi->refcount, 1); 1215 sgi->index = entryg->gate.index; 1216 sgi->init_ipv = entryg->gate.prio; 1217 sgi->basetime = entryg->gate.basetime; 1218 sgi->cycletime = entryg->gate.cycletime; 1219 sgi->num_entries = entryg->gate.num_entries; 1220 1221 e = sgi->entries; 1222 for (i = 0; i < entryg->gate.num_entries; i++) { 1223 e[i].gate_state = entryg->gate.entries[i].gate_state; 1224 e[i].interval = entryg->gate.entries[i].interval; 1225 e[i].ipv = entryg->gate.entries[i].ipv; 1226 e[i].maxoctets = entryg->gate.entries[i].maxoctets; 1227 } 1228 1229 filter->sgi_index = sgi->index; 1230 1231 sfi = kzalloc(sizeof(*sfi), GFP_KERNEL); 1232 if (!sfi) { 1233 err = -ENOMEM; 1234 goto free_gate; 1235 } 1236 1237 refcount_set(&sfi->refcount, 1); 1238 sfi->gate_id = sgi->index; 1239 sfi->meter_id = ENETC_PSFP_WILDCARD; 1240 1241 /* Flow meter and max frame size */ 1242 if (entryp) { 1243 if (entryp->police.burst) { 1244 fmi = kzalloc(sizeof(*fmi), GFP_KERNEL); 1245 if (!fmi) { 1246 err = -ENOMEM; 1247 goto free_sfi; 1248 } 1249 refcount_set(&fmi->refcount, 1); 1250 fmi->cir = entryp->police.rate_bytes_ps; 1251 fmi->cbs = entryp->police.burst; 1252 fmi->index = entryp->police.index; 1253 filter->flags |= ENETC_PSFP_FLAGS_FMI; 1254 filter->fmi_index = fmi->index; 1255 sfi->meter_id = fmi->index; 1256 } 1257 1258 if (entryp->police.mtu) 1259 sfi->maxsdu = entryp->police.mtu; 1260 } 1261 1262 /* prio ref the filter prio */ 1263 if (f->common.prio && f->common.prio <= BIT(3)) 1264 sfi->prio = f->common.prio - 1; 1265 else 1266 sfi->prio = ENETC_PSFP_WILDCARD; 1267 1268 old_sfi = enetc_psfp_check_sfi(sfi); 1269 if (!old_sfi) { 1270 int index; 1271 1272 index = enetc_get_free_index(priv); 1273 if (sfi->handle < 0) { 1274 NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!"); 1275 err = -ENOSPC; 1276 goto free_fmi; 1277 } 1278 1279 sfi->index = index; 1280 sfi->handle = index + HANDLE_OFFSET; 1281 /* Update the stream filter handle also */ 1282 filter->sid.handle = sfi->handle; 1283 filter->sfi_index = sfi->index; 1284 sfi_overwrite = 0; 1285 } else { 1286 filter->sfi_index = old_sfi->index; 1287 filter->sid.handle = old_sfi->handle; 1288 sfi_overwrite = 1; 1289 } 1290 1291 err = enetc_psfp_hw_set(priv, &filter->sid, 1292 sfi_overwrite ? NULL : sfi, sgi, fmi); 1293 if (err) 1294 goto free_fmi; 1295 1296 spin_lock(&epsfp.psfp_lock); 1297 if (filter->flags & ENETC_PSFP_FLAGS_FMI) { 1298 old_fmi = enetc_get_meter_by_index(filter->fmi_index); 1299 if (old_fmi) { 1300 fmi->refcount = old_fmi->refcount; 1301 refcount_set(&fmi->refcount, 1302 refcount_read(&old_fmi->refcount) + 1); 1303 hlist_del(&old_fmi->node); 1304 kfree(old_fmi); 1305 } 1306 hlist_add_head(&fmi->node, &epsfp.psfp_meter_list); 1307 } 1308 1309 /* Remove the old node if exist and update with a new node */ 1310 old_sgi = enetc_get_gate_by_index(filter->sgi_index); 1311 if (old_sgi) { 1312 refcount_set(&sgi->refcount, 1313 refcount_read(&old_sgi->refcount) + 1); 1314 hlist_del(&old_sgi->node); 1315 kfree(old_sgi); 1316 } 1317 1318 hlist_add_head(&sgi->node, &epsfp.psfp_gate_list); 1319 1320 if (!old_sfi) { 1321 hlist_add_head(&sfi->node, &epsfp.psfp_filter_list); 1322 set_bit(sfi->index, epsfp.psfp_sfi_bitmap); 1323 } else { 1324 kfree(sfi); 1325 refcount_inc(&old_sfi->refcount); 1326 } 1327 1328 old_filter = enetc_get_stream_by_index(filter->sid.index); 1329 if (old_filter) 1330 remove_one_chain(priv, old_filter); 1331 1332 filter->stats.lastused = jiffies; 1333 hlist_add_head(&filter->node, &epsfp.stream_list); 1334 1335 spin_unlock(&epsfp.psfp_lock); 1336 1337 return 0; 1338 1339 free_fmi: 1340 kfree(fmi); 1341 free_sfi: 1342 kfree(sfi); 1343 free_gate: 1344 kfree(sgi); 1345 free_filter: 1346 kfree(filter); 1347 1348 return err; 1349 } 1350 1351 static int enetc_config_clsflower(struct enetc_ndev_priv *priv, 1352 struct flow_cls_offload *cls_flower) 1353 { 1354 struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); 1355 struct netlink_ext_ack *extack = cls_flower->common.extack; 1356 struct flow_dissector *dissector = rule->match.dissector; 1357 struct flow_action *action = &rule->action; 1358 struct flow_action_entry *entry; 1359 struct actions_fwd *fwd; 1360 u64 actions = 0; 1361 int i, err; 1362 1363 if (!flow_action_has_entries(action)) { 1364 NL_SET_ERR_MSG_MOD(extack, "At least one action is needed"); 1365 return -EINVAL; 1366 } 1367 1368 flow_action_for_each(i, entry, action) 1369 actions |= BIT(entry->id); 1370 1371 fwd = enetc_check_flow_actions(actions, dissector->used_keys); 1372 if (!fwd) { 1373 NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!"); 1374 return -EOPNOTSUPP; 1375 } 1376 1377 if (fwd->output & FILTER_ACTION_TYPE_PSFP) { 1378 err = enetc_psfp_parse_clsflower(priv, cls_flower); 1379 if (err) { 1380 NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs"); 1381 return err; 1382 } 1383 } else { 1384 NL_SET_ERR_MSG_MOD(extack, "Unsupported actions"); 1385 return -EOPNOTSUPP; 1386 } 1387 1388 return 0; 1389 } 1390 1391 static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv, 1392 struct flow_cls_offload *f) 1393 { 1394 struct enetc_stream_filter *filter; 1395 struct netlink_ext_ack *extack = f->common.extack; 1396 int err; 1397 1398 if (f->common.chain_index >= priv->psfp_cap.max_streamid) { 1399 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); 1400 return -ENOSPC; 1401 } 1402 1403 filter = enetc_get_stream_by_index(f->common.chain_index); 1404 if (!filter) 1405 return -EINVAL; 1406 1407 err = enetc_streamid_hw_set(priv, &filter->sid, false); 1408 if (err) 1409 return err; 1410 1411 remove_one_chain(priv, filter); 1412 1413 return 0; 1414 } 1415 1416 static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv, 1417 struct flow_cls_offload *f) 1418 { 1419 return enetc_psfp_destroy_clsflower(priv, f); 1420 } 1421 1422 static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv, 1423 struct flow_cls_offload *f) 1424 { 1425 struct psfp_streamfilter_counters counters = {}; 1426 struct enetc_stream_filter *filter; 1427 struct flow_stats stats = {}; 1428 int err; 1429 1430 filter = enetc_get_stream_by_index(f->common.chain_index); 1431 if (!filter) 1432 return -EINVAL; 1433 1434 err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters); 1435 if (err) 1436 return -EINVAL; 1437 1438 spin_lock(&epsfp.psfp_lock); 1439 stats.pkts = counters.matching_frames_count + 1440 counters.not_passing_sdu_count - 1441 filter->stats.pkts; 1442 stats.drops = counters.not_passing_frames_count + 1443 counters.not_passing_sdu_count + 1444 counters.red_frames_count - 1445 filter->stats.drops; 1446 stats.lastused = filter->stats.lastused; 1447 filter->stats.pkts += stats.pkts; 1448 filter->stats.drops += stats.drops; 1449 spin_unlock(&epsfp.psfp_lock); 1450 1451 flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, 1452 stats.lastused, FLOW_ACTION_HW_STATS_DELAYED); 1453 1454 return 0; 1455 } 1456 1457 static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv, 1458 struct flow_cls_offload *cls_flower) 1459 { 1460 switch (cls_flower->command) { 1461 case FLOW_CLS_REPLACE: 1462 return enetc_config_clsflower(priv, cls_flower); 1463 case FLOW_CLS_DESTROY: 1464 return enetc_destroy_clsflower(priv, cls_flower); 1465 case FLOW_CLS_STATS: 1466 return enetc_psfp_get_stats(priv, cls_flower); 1467 default: 1468 return -EOPNOTSUPP; 1469 } 1470 } 1471 1472 static inline void clean_psfp_sfi_bitmap(void) 1473 { 1474 bitmap_free(epsfp.psfp_sfi_bitmap); 1475 epsfp.psfp_sfi_bitmap = NULL; 1476 } 1477 1478 static void clean_stream_list(void) 1479 { 1480 struct enetc_stream_filter *s; 1481 struct hlist_node *tmp; 1482 1483 hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) { 1484 hlist_del(&s->node); 1485 kfree(s); 1486 } 1487 } 1488 1489 static void clean_sfi_list(void) 1490 { 1491 struct enetc_psfp_filter *sfi; 1492 struct hlist_node *tmp; 1493 1494 hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) { 1495 hlist_del(&sfi->node); 1496 kfree(sfi); 1497 } 1498 } 1499 1500 static void clean_sgi_list(void) 1501 { 1502 struct enetc_psfp_gate *sgi; 1503 struct hlist_node *tmp; 1504 1505 hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) { 1506 hlist_del(&sgi->node); 1507 kfree(sgi); 1508 } 1509 } 1510 1511 static void clean_psfp_all(void) 1512 { 1513 /* Disable all list nodes and free all memory */ 1514 clean_sfi_list(); 1515 clean_sgi_list(); 1516 clean_stream_list(); 1517 epsfp.dev_bitmap = 0; 1518 clean_psfp_sfi_bitmap(); 1519 } 1520 1521 int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1522 void *cb_priv) 1523 { 1524 struct net_device *ndev = cb_priv; 1525 1526 if (!tc_can_offload(ndev)) 1527 return -EOPNOTSUPP; 1528 1529 switch (type) { 1530 case TC_SETUP_CLSFLOWER: 1531 return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data); 1532 default: 1533 return -EOPNOTSUPP; 1534 } 1535 } 1536 1537 int enetc_psfp_init(struct enetc_ndev_priv *priv) 1538 { 1539 if (epsfp.psfp_sfi_bitmap) 1540 return 0; 1541 1542 epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter, 1543 GFP_KERNEL); 1544 if (!epsfp.psfp_sfi_bitmap) 1545 return -ENOMEM; 1546 1547 spin_lock_init(&epsfp.psfp_lock); 1548 1549 if (list_empty(&enetc_block_cb_list)) 1550 epsfp.dev_bitmap = 0; 1551 1552 return 0; 1553 } 1554 1555 int enetc_psfp_clean(struct enetc_ndev_priv *priv) 1556 { 1557 if (!list_empty(&enetc_block_cb_list)) 1558 return -EBUSY; 1559 1560 clean_psfp_all(); 1561 1562 return 0; 1563 } 1564 1565 int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data) 1566 { 1567 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1568 struct flow_block_offload *f = type_data; 1569 int err; 1570 1571 err = flow_block_cb_setup_simple(f, &enetc_block_cb_list, 1572 enetc_setup_tc_block_cb, 1573 ndev, ndev, true); 1574 if (err) 1575 return err; 1576 1577 switch (f->command) { 1578 case FLOW_BLOCK_BIND: 1579 set_bit(enetc_get_port(priv), &epsfp.dev_bitmap); 1580 break; 1581 case FLOW_BLOCK_UNBIND: 1582 clear_bit(enetc_get_port(priv), &epsfp.dev_bitmap); 1583 if (!epsfp.dev_bitmap) 1584 clean_psfp_all(); 1585 break; 1586 } 1587 1588 return 0; 1589 } 1590