1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2019 NXP */ 3 4 #include "enetc.h" 5 6 #include <net/pkt_sched.h> 7 #include <linux/math64.h> 8 #include <linux/refcount.h> 9 #include <net/pkt_cls.h> 10 #include <net/tc_act/tc_gate.h> 11 12 static u16 enetc_get_max_gcl_len(struct enetc_hw *hw) 13 { 14 return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET) 15 & ENETC_QBV_MAX_GCL_LEN_MASK; 16 } 17 18 void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed) 19 { 20 u32 old_speed = priv->speed; 21 u32 pspeed; 22 23 if (speed == old_speed) 24 return; 25 26 switch (speed) { 27 case SPEED_1000: 28 pspeed = ENETC_PMR_PSPEED_1000M; 29 break; 30 case SPEED_2500: 31 pspeed = ENETC_PMR_PSPEED_2500M; 32 break; 33 case SPEED_100: 34 pspeed = ENETC_PMR_PSPEED_100M; 35 break; 36 case SPEED_10: 37 default: 38 pspeed = ENETC_PMR_PSPEED_10M; 39 } 40 41 priv->speed = speed; 42 enetc_port_wr(&priv->si->hw, ENETC_PMR, 43 (enetc_port_rd(&priv->si->hw, ENETC_PMR) 44 & (~ENETC_PMR_PSPEED_MASK)) 45 | pspeed); 46 } 47 48 static int enetc_setup_taprio(struct net_device *ndev, 49 struct tc_taprio_qopt_offload *admin_conf) 50 { 51 struct enetc_ndev_priv *priv = netdev_priv(ndev); 52 struct enetc_cbd cbd = {.cmd = 0}; 53 struct tgs_gcl_conf *gcl_config; 54 struct tgs_gcl_data *gcl_data; 55 struct gce *gce; 56 dma_addr_t dma; 57 u16 data_size; 58 u16 gcl_len; 59 u32 tge; 60 int err; 61 int i; 62 63 if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw)) 64 return -EINVAL; 65 gcl_len = admin_conf->num_entries; 66 67 tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET); 68 if (!admin_conf->enable) { 69 enetc_wr(&priv->si->hw, 70 ENETC_QBV_PTGCR_OFFSET, 71 tge & (~ENETC_QBV_TGE)); 72 return 0; 73 } 74 75 if (admin_conf->cycle_time > U32_MAX || 76 admin_conf->cycle_time_extension > U32_MAX) 77 return -EINVAL; 78 79 /* Configure the (administrative) gate control list using the 80 * control BD descriptor. 81 */ 82 gcl_config = &cbd.gcl_conf; 83 84 data_size = struct_size(gcl_data, entry, gcl_len); 85 gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 86 if (!gcl_data) 87 return -ENOMEM; 88 89 gce = (struct gce *)(gcl_data + 1); 90 91 /* Set all gates open as default */ 92 gcl_config->atc = 0xff; 93 gcl_config->acl_len = cpu_to_le16(gcl_len); 94 95 if (!admin_conf->base_time) { 96 gcl_data->btl = 97 cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0)); 98 gcl_data->bth = 99 cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1)); 100 } else { 101 gcl_data->btl = 102 cpu_to_le32(lower_32_bits(admin_conf->base_time)); 103 gcl_data->bth = 104 cpu_to_le32(upper_32_bits(admin_conf->base_time)); 105 } 106 107 gcl_data->ct = cpu_to_le32(admin_conf->cycle_time); 108 gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension); 109 110 for (i = 0; i < gcl_len; i++) { 111 struct tc_taprio_sched_entry *temp_entry; 112 struct gce *temp_gce = gce + i; 113 114 temp_entry = &admin_conf->entries[i]; 115 116 temp_gce->gate = (u8)temp_entry->gate_mask; 117 temp_gce->period = cpu_to_le32(temp_entry->interval); 118 } 119 120 cbd.length = cpu_to_le16(data_size); 121 cbd.status_flags = 0; 122 123 dma = dma_map_single(&priv->si->pdev->dev, gcl_data, 124 data_size, DMA_TO_DEVICE); 125 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 126 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 127 kfree(gcl_data); 128 return -ENOMEM; 129 } 130 131 cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); 132 cbd.addr[1] = cpu_to_le32(upper_32_bits(dma)); 133 cbd.cls = BDCR_CMD_PORT_GCL; 134 cbd.status_flags = 0; 135 136 enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET, 137 tge | ENETC_QBV_TGE); 138 139 err = enetc_send_cmd(priv->si, &cbd); 140 if (err) 141 enetc_wr(&priv->si->hw, 142 ENETC_QBV_PTGCR_OFFSET, 143 tge & (~ENETC_QBV_TGE)); 144 145 dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE); 146 kfree(gcl_data); 147 148 return err; 149 } 150 151 int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) 152 { 153 struct tc_taprio_qopt_offload *taprio = type_data; 154 struct enetc_ndev_priv *priv = netdev_priv(ndev); 155 int err; 156 int i; 157 158 /* TSD and Qbv are mutually exclusive in hardware */ 159 for (i = 0; i < priv->num_tx_rings; i++) 160 if (priv->tx_ring[i]->tsd_enable) 161 return -EBUSY; 162 163 for (i = 0; i < priv->num_tx_rings; i++) 164 enetc_set_bdr_prio(&priv->si->hw, 165 priv->tx_ring[i]->index, 166 taprio->enable ? i : 0); 167 168 err = enetc_setup_taprio(ndev, taprio); 169 170 if (err) 171 for (i = 0; i < priv->num_tx_rings; i++) 172 enetc_set_bdr_prio(&priv->si->hw, 173 priv->tx_ring[i]->index, 174 taprio->enable ? 0 : i); 175 176 return err; 177 } 178 179 static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc) 180 { 181 return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE; 182 } 183 184 static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc) 185 { 186 return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK; 187 } 188 189 int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) 190 { 191 struct enetc_ndev_priv *priv = netdev_priv(ndev); 192 struct tc_cbs_qopt_offload *cbs = type_data; 193 u32 port_transmit_rate = priv->speed; 194 u8 tc_nums = netdev_get_num_tc(ndev); 195 struct enetc_si *si = priv->si; 196 u32 hi_credit_bit, hi_credit_reg; 197 u32 max_interference_size; 198 u32 port_frame_max_size; 199 u8 tc = cbs->queue; 200 u8 prio_top, prio_next; 201 int bw_sum = 0; 202 u8 bw; 203 204 prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1); 205 prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2); 206 207 /* Support highest prio and second prio tc in cbs mode */ 208 if (tc != prio_top && tc != prio_next) 209 return -EOPNOTSUPP; 210 211 if (!cbs->enable) { 212 /* Make sure the other TC that are numerically 213 * lower than this TC have been disabled. 214 */ 215 if (tc == prio_top && 216 enetc_get_cbs_enable(&si->hw, prio_next)) { 217 dev_err(&ndev->dev, 218 "Disable TC%d before disable TC%d\n", 219 prio_next, tc); 220 return -EINVAL; 221 } 222 223 enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0); 224 enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0); 225 226 return 0; 227 } 228 229 if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L || 230 cbs->idleslope < 0 || cbs->sendslope > 0) 231 return -EOPNOTSUPP; 232 233 port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 234 235 bw = cbs->idleslope / (port_transmit_rate * 10UL); 236 237 /* Make sure the other TC that are numerically 238 * higher than this TC have been enabled. 239 */ 240 if (tc == prio_next) { 241 if (!enetc_get_cbs_enable(&si->hw, prio_top)) { 242 dev_err(&ndev->dev, 243 "Enable TC%d first before enable TC%d\n", 244 prio_top, prio_next); 245 return -EINVAL; 246 } 247 bw_sum += enetc_get_cbs_bw(&si->hw, prio_top); 248 } 249 250 if (bw_sum + bw >= 100) { 251 dev_err(&ndev->dev, 252 "The sum of all CBS Bandwidth can't exceed 100\n"); 253 return -EINVAL; 254 } 255 256 enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc)); 257 258 /* For top prio TC, the max_interfrence_size is maxSizedFrame. 259 * 260 * For next prio TC, the max_interfrence_size is calculated as below: 261 * 262 * max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra) 263 * 264 * - RA: idleSlope for AVB Class A 265 * - R0: port transmit rate 266 * - M0: maximum sized frame for the port 267 * - MA: maximum sized frame for AVB Class A 268 */ 269 270 if (tc == prio_top) { 271 max_interference_size = port_frame_max_size * 8; 272 } else { 273 u32 m0, ma, r0, ra; 274 275 m0 = port_frame_max_size * 8; 276 ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8; 277 ra = enetc_get_cbs_bw(&si->hw, prio_top) * 278 port_transmit_rate * 10000ULL; 279 r0 = port_transmit_rate * 1000000ULL; 280 max_interference_size = m0 + ma + 281 (u32)div_u64((u64)ra * m0, r0 - ra); 282 } 283 284 /* hiCredit bits calculate by: 285 * 286 * maxSizedFrame * (idleSlope/portTxRate) 287 */ 288 hi_credit_bit = max_interference_size * bw / 100; 289 290 /* hiCredit bits to hiCredit register need to calculated as: 291 * 292 * (enetClockFrequency / portTransmitRate) * 100 293 */ 294 hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit, 295 port_transmit_rate * 1000000ULL); 296 297 enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg); 298 299 /* Set bw register and enable this traffic class */ 300 enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE); 301 302 return 0; 303 } 304 305 int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) 306 { 307 struct enetc_ndev_priv *priv = netdev_priv(ndev); 308 struct tc_etf_qopt_offload *qopt = type_data; 309 u8 tc_nums = netdev_get_num_tc(ndev); 310 int tc; 311 312 if (!tc_nums) 313 return -EOPNOTSUPP; 314 315 tc = qopt->queue; 316 317 if (tc < 0 || tc >= priv->num_tx_rings) 318 return -EINVAL; 319 320 /* Do not support TXSTART and TX CSUM offload simutaniously */ 321 if (ndev->features & NETIF_F_CSUM_MASK) 322 return -EBUSY; 323 324 /* TSD and Qbv are mutually exclusive in hardware */ 325 if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) 326 return -EBUSY; 327 328 priv->tx_ring[tc]->tsd_enable = qopt->enable; 329 enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc), 330 qopt->enable ? ENETC_TSDE : 0); 331 332 return 0; 333 } 334 335 enum streamid_type { 336 STREAMID_TYPE_RESERVED = 0, 337 STREAMID_TYPE_NULL, 338 STREAMID_TYPE_SMAC, 339 }; 340 341 enum streamid_vlan_tagged { 342 STREAMID_VLAN_RESERVED = 0, 343 STREAMID_VLAN_TAGGED, 344 STREAMID_VLAN_UNTAGGED, 345 STREAMID_VLAN_ALL, 346 }; 347 348 #define ENETC_PSFP_WILDCARD -1 349 #define HANDLE_OFFSET 100 350 351 enum forward_type { 352 FILTER_ACTION_TYPE_PSFP = BIT(0), 353 FILTER_ACTION_TYPE_ACL = BIT(1), 354 FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0), 355 }; 356 357 /* This is for limit output type for input actions */ 358 struct actions_fwd { 359 u64 actions; 360 u64 keys; /* include the must needed keys */ 361 enum forward_type output; 362 }; 363 364 struct psfp_streamfilter_counters { 365 u64 matching_frames_count; 366 u64 passing_frames_count; 367 u64 not_passing_frames_count; 368 u64 passing_sdu_count; 369 u64 not_passing_sdu_count; 370 u64 red_frames_count; 371 }; 372 373 struct enetc_streamid { 374 u32 index; 375 union { 376 u8 src_mac[6]; 377 u8 dst_mac[6]; 378 }; 379 u8 filtertype; 380 u16 vid; 381 u8 tagged; 382 s32 handle; 383 }; 384 385 struct enetc_psfp_filter { 386 u32 index; 387 s32 handle; 388 s8 prio; 389 u32 maxsdu; 390 u32 gate_id; 391 s32 meter_id; 392 refcount_t refcount; 393 struct hlist_node node; 394 }; 395 396 struct enetc_psfp_gate { 397 u32 index; 398 s8 init_ipv; 399 u64 basetime; 400 u64 cycletime; 401 u64 cycletimext; 402 u32 num_entries; 403 refcount_t refcount; 404 struct hlist_node node; 405 struct action_gate_entry entries[]; 406 }; 407 408 /* Only enable the green color frame now 409 * Will add eir and ebs color blind, couple flag etc when 410 * policing action add more offloading parameters 411 */ 412 struct enetc_psfp_meter { 413 u32 index; 414 u32 cir; 415 u32 cbs; 416 refcount_t refcount; 417 struct hlist_node node; 418 }; 419 420 #define ENETC_PSFP_FLAGS_FMI BIT(0) 421 422 struct enetc_stream_filter { 423 struct enetc_streamid sid; 424 u32 sfi_index; 425 u32 sgi_index; 426 u32 flags; 427 u32 fmi_index; 428 struct flow_stats stats; 429 struct hlist_node node; 430 }; 431 432 struct enetc_psfp { 433 unsigned long dev_bitmap; 434 unsigned long *psfp_sfi_bitmap; 435 struct hlist_head stream_list; 436 struct hlist_head psfp_filter_list; 437 struct hlist_head psfp_gate_list; 438 struct hlist_head psfp_meter_list; 439 spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */ 440 }; 441 442 static struct actions_fwd enetc_act_fwd[] = { 443 { 444 BIT(FLOW_ACTION_GATE), 445 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), 446 FILTER_ACTION_TYPE_PSFP 447 }, 448 { 449 BIT(FLOW_ACTION_POLICE) | 450 BIT(FLOW_ACTION_GATE), 451 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), 452 FILTER_ACTION_TYPE_PSFP 453 }, 454 /* example for ACL actions */ 455 { 456 BIT(FLOW_ACTION_DROP), 457 0, 458 FILTER_ACTION_TYPE_ACL 459 } 460 }; 461 462 static struct enetc_psfp epsfp = { 463 .psfp_sfi_bitmap = NULL, 464 }; 465 466 static LIST_HEAD(enetc_block_cb_list); 467 468 static inline int enetc_get_port(struct enetc_ndev_priv *priv) 469 { 470 return priv->si->pdev->devfn & 0x7; 471 } 472 473 /* Stream Identity Entry Set Descriptor */ 474 static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, 475 struct enetc_streamid *sid, 476 u8 enable) 477 { 478 struct enetc_cbd cbd = {.cmd = 0}; 479 struct streamid_data *si_data; 480 struct streamid_conf *si_conf; 481 u16 data_size; 482 dma_addr_t dma; 483 int err; 484 485 if (sid->index >= priv->psfp_cap.max_streamid) 486 return -EINVAL; 487 488 if (sid->filtertype != STREAMID_TYPE_NULL && 489 sid->filtertype != STREAMID_TYPE_SMAC) 490 return -EOPNOTSUPP; 491 492 /* Disable operation before enable */ 493 cbd.index = cpu_to_le16((u16)sid->index); 494 cbd.cls = BDCR_CMD_STREAM_IDENTIFY; 495 cbd.status_flags = 0; 496 497 data_size = sizeof(struct streamid_data); 498 si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 499 cbd.length = cpu_to_le16(data_size); 500 501 dma = dma_map_single(&priv->si->pdev->dev, si_data, 502 data_size, DMA_FROM_DEVICE); 503 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 504 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 505 kfree(si_data); 506 return -ENOMEM; 507 } 508 509 cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); 510 cbd.addr[1] = cpu_to_le32(upper_32_bits(dma)); 511 eth_broadcast_addr(si_data->dmac); 512 si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK 513 + ((0x3 << 14) | ENETC_CBDR_SID_VIDM)); 514 515 si_conf = &cbd.sid_set; 516 /* Only one port supported for one entry, set itself */ 517 si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv)); 518 si_conf->id_type = 1; 519 si_conf->oui[2] = 0x0; 520 si_conf->oui[1] = 0x80; 521 si_conf->oui[0] = 0xC2; 522 523 err = enetc_send_cmd(priv->si, &cbd); 524 if (err) 525 return -EINVAL; 526 527 if (!enable) { 528 kfree(si_data); 529 return 0; 530 } 531 532 /* Enable the entry overwrite again incase space flushed by hardware */ 533 memset(&cbd, 0, sizeof(cbd)); 534 535 cbd.index = cpu_to_le16((u16)sid->index); 536 cbd.cmd = 0; 537 cbd.cls = BDCR_CMD_STREAM_IDENTIFY; 538 cbd.status_flags = 0; 539 540 si_conf->en = 0x80; 541 si_conf->stream_handle = cpu_to_le32(sid->handle); 542 si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv)); 543 si_conf->id_type = sid->filtertype; 544 si_conf->oui[2] = 0x0; 545 si_conf->oui[1] = 0x80; 546 si_conf->oui[0] = 0xC2; 547 548 memset(si_data, 0, data_size); 549 550 cbd.length = cpu_to_le16(data_size); 551 552 cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); 553 cbd.addr[1] = cpu_to_le32(upper_32_bits(dma)); 554 555 /* VIDM default to be 1. 556 * VID Match. If set (b1) then the VID must match, otherwise 557 * any VID is considered a match. VIDM setting is only used 558 * when TG is set to b01. 559 */ 560 if (si_conf->id_type == STREAMID_TYPE_NULL) { 561 ether_addr_copy(si_data->dmac, sid->dst_mac); 562 si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) + 563 ((((u16)(sid->tagged) & 0x3) << 14) 564 | ENETC_CBDR_SID_VIDM); 565 } else if (si_conf->id_type == STREAMID_TYPE_SMAC) { 566 ether_addr_copy(si_data->smac, sid->src_mac); 567 si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) + 568 ((((u16)(sid->tagged) & 0x3) << 14) 569 | ENETC_CBDR_SID_VIDM); 570 } 571 572 err = enetc_send_cmd(priv->si, &cbd); 573 kfree(si_data); 574 575 return err; 576 } 577 578 /* Stream Filter Instance Set Descriptor */ 579 static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv, 580 struct enetc_psfp_filter *sfi, 581 u8 enable) 582 { 583 struct enetc_cbd cbd = {.cmd = 0}; 584 struct sfi_conf *sfi_config; 585 586 cbd.index = cpu_to_le16(sfi->index); 587 cbd.cls = BDCR_CMD_STREAM_FILTER; 588 cbd.status_flags = 0x80; 589 cbd.length = cpu_to_le16(1); 590 591 sfi_config = &cbd.sfi_conf; 592 if (!enable) 593 goto exit; 594 595 sfi_config->en = 0x80; 596 597 if (sfi->handle >= 0) { 598 sfi_config->stream_handle = 599 cpu_to_le32(sfi->handle); 600 sfi_config->sthm |= 0x80; 601 } 602 603 sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id); 604 sfi_config->input_ports = cpu_to_le32(1 << enetc_get_port(priv)); 605 606 /* The priority value which may be matched against the 607 * frame’s priority value to determine a match for this entry. 608 */ 609 if (sfi->prio >= 0) 610 sfi_config->multi |= (sfi->prio & 0x7) | 0x8; 611 612 /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX 613 * field as being either an MSDU value or an index into the Flow 614 * Meter Instance table. 615 */ 616 if (sfi->maxsdu) { 617 sfi_config->msdu = 618 cpu_to_le16(sfi->maxsdu); 619 sfi_config->multi |= 0x40; 620 } 621 622 if (sfi->meter_id >= 0) { 623 sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id); 624 sfi_config->multi |= 0x80; 625 } 626 627 exit: 628 return enetc_send_cmd(priv->si, &cbd); 629 } 630 631 static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv, 632 u32 index, 633 struct psfp_streamfilter_counters *cnt) 634 { 635 struct enetc_cbd cbd = { .cmd = 2 }; 636 struct sfi_counter_data *data_buf; 637 dma_addr_t dma; 638 u16 data_size; 639 int err; 640 641 cbd.index = cpu_to_le16((u16)index); 642 cbd.cmd = 2; 643 cbd.cls = BDCR_CMD_STREAM_FILTER; 644 cbd.status_flags = 0; 645 646 data_size = sizeof(struct sfi_counter_data); 647 data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 648 if (!data_buf) 649 return -ENOMEM; 650 651 dma = dma_map_single(&priv->si->pdev->dev, data_buf, 652 data_size, DMA_FROM_DEVICE); 653 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 654 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 655 err = -ENOMEM; 656 goto exit; 657 } 658 cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); 659 cbd.addr[1] = cpu_to_le32(upper_32_bits(dma)); 660 661 cbd.length = cpu_to_le16(data_size); 662 663 err = enetc_send_cmd(priv->si, &cbd); 664 if (err) 665 goto exit; 666 667 cnt->matching_frames_count = ((u64)data_buf->matchh << 32) + 668 data_buf->matchl; 669 670 cnt->not_passing_sdu_count = ((u64)data_buf->msdu_droph << 32) + 671 data_buf->msdu_dropl; 672 673 cnt->passing_sdu_count = cnt->matching_frames_count 674 - cnt->not_passing_sdu_count; 675 676 cnt->not_passing_frames_count = 677 ((u64)data_buf->stream_gate_droph << 32) + 678 data_buf->stream_gate_dropl; 679 680 cnt->passing_frames_count = cnt->matching_frames_count - 681 cnt->not_passing_sdu_count - 682 cnt->not_passing_frames_count; 683 684 cnt->red_frames_count = ((u64)data_buf->flow_meter_droph << 32) + 685 data_buf->flow_meter_dropl; 686 687 exit: 688 kfree(data_buf); 689 return err; 690 } 691 692 static u64 get_ptp_now(struct enetc_hw *hw) 693 { 694 u64 now_lo, now_hi, now; 695 696 now_lo = enetc_rd(hw, ENETC_SICTR0); 697 now_hi = enetc_rd(hw, ENETC_SICTR1); 698 now = now_lo | now_hi << 32; 699 700 return now; 701 } 702 703 static int get_start_ns(u64 now, u64 cycle, u64 *start) 704 { 705 u64 n; 706 707 if (!cycle) 708 return -EFAULT; 709 710 n = div64_u64(now, cycle); 711 712 *start = (n + 1) * cycle; 713 714 return 0; 715 } 716 717 /* Stream Gate Instance Set Descriptor */ 718 static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, 719 struct enetc_psfp_gate *sgi, 720 u8 enable) 721 { 722 struct enetc_cbd cbd = { .cmd = 0 }; 723 struct sgi_table *sgi_config; 724 struct sgcl_conf *sgcl_config; 725 struct sgcl_data *sgcl_data; 726 struct sgce *sgce; 727 dma_addr_t dma; 728 u16 data_size; 729 int err, i; 730 u64 now; 731 732 cbd.index = cpu_to_le16(sgi->index); 733 cbd.cmd = 0; 734 cbd.cls = BDCR_CMD_STREAM_GCL; 735 cbd.status_flags = 0x80; 736 737 /* disable */ 738 if (!enable) 739 return enetc_send_cmd(priv->si, &cbd); 740 741 if (!sgi->num_entries) 742 return 0; 743 744 if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist || 745 !sgi->cycletime) 746 return -EINVAL; 747 748 /* enable */ 749 sgi_config = &cbd.sgi_table; 750 751 /* Keep open before gate list start */ 752 sgi_config->ocgtst = 0x80; 753 754 sgi_config->oipv = (sgi->init_ipv < 0) ? 755 0x0 : ((sgi->init_ipv & 0x7) | 0x8); 756 757 sgi_config->en = 0x80; 758 759 /* Basic config */ 760 err = enetc_send_cmd(priv->si, &cbd); 761 if (err) 762 return -EINVAL; 763 764 memset(&cbd, 0, sizeof(cbd)); 765 766 cbd.index = cpu_to_le16(sgi->index); 767 cbd.cmd = 1; 768 cbd.cls = BDCR_CMD_STREAM_GCL; 769 cbd.status_flags = 0; 770 771 sgcl_config = &cbd.sgcl_conf; 772 773 sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3; 774 775 data_size = struct_size(sgcl_data, sgcl, sgi->num_entries); 776 777 sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 778 if (!sgcl_data) 779 return -ENOMEM; 780 781 cbd.length = cpu_to_le16(data_size); 782 783 dma = dma_map_single(&priv->si->pdev->dev, 784 sgcl_data, data_size, 785 DMA_FROM_DEVICE); 786 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 787 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 788 kfree(sgcl_data); 789 return -ENOMEM; 790 } 791 792 cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); 793 cbd.addr[1] = cpu_to_le32(upper_32_bits(dma)); 794 795 sgce = &sgcl_data->sgcl[0]; 796 797 sgcl_config->agtst = 0x80; 798 799 sgcl_data->ct = sgi->cycletime; 800 sgcl_data->cte = sgi->cycletimext; 801 802 if (sgi->init_ipv >= 0) 803 sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8; 804 805 for (i = 0; i < sgi->num_entries; i++) { 806 struct action_gate_entry *from = &sgi->entries[i]; 807 struct sgce *to = &sgce[i]; 808 809 if (from->gate_state) 810 to->multi |= 0x10; 811 812 if (from->ipv >= 0) 813 to->multi |= ((from->ipv & 0x7) << 5) | 0x08; 814 815 if (from->maxoctets >= 0) { 816 to->multi |= 0x01; 817 to->msdu[0] = from->maxoctets & 0xFF; 818 to->msdu[1] = (from->maxoctets >> 8) & 0xFF; 819 to->msdu[2] = (from->maxoctets >> 16) & 0xFF; 820 } 821 822 to->interval = from->interval; 823 } 824 825 /* If basetime is less than now, calculate start time */ 826 now = get_ptp_now(&priv->si->hw); 827 828 if (sgi->basetime < now) { 829 u64 start; 830 831 err = get_start_ns(now, sgi->cycletime, &start); 832 if (err) 833 goto exit; 834 sgcl_data->btl = lower_32_bits(start); 835 sgcl_data->bth = upper_32_bits(start); 836 } else { 837 u32 hi, lo; 838 839 hi = upper_32_bits(sgi->basetime); 840 lo = lower_32_bits(sgi->basetime); 841 sgcl_data->bth = hi; 842 sgcl_data->btl = lo; 843 } 844 845 err = enetc_send_cmd(priv->si, &cbd); 846 847 exit: 848 kfree(sgcl_data); 849 850 return err; 851 } 852 853 static int enetc_flowmeter_hw_set(struct enetc_ndev_priv *priv, 854 struct enetc_psfp_meter *fmi, 855 u8 enable) 856 { 857 struct enetc_cbd cbd = { .cmd = 0 }; 858 struct fmi_conf *fmi_config; 859 u64 temp = 0; 860 861 cbd.index = cpu_to_le16((u16)fmi->index); 862 cbd.cls = BDCR_CMD_FLOW_METER; 863 cbd.status_flags = 0x80; 864 865 if (!enable) 866 return enetc_send_cmd(priv->si, &cbd); 867 868 fmi_config = &cbd.fmi_conf; 869 fmi_config->en = 0x80; 870 871 if (fmi->cir) { 872 temp = (u64)8000 * fmi->cir; 873 temp = div_u64(temp, 3725); 874 } 875 876 fmi_config->cir = cpu_to_le32((u32)temp); 877 fmi_config->cbs = cpu_to_le32(fmi->cbs); 878 879 /* Default for eir ebs disable */ 880 fmi_config->eir = 0; 881 fmi_config->ebs = 0; 882 883 /* Default: 884 * mark red disable 885 * drop on yellow disable 886 * color mode disable 887 * couple flag disable 888 */ 889 fmi_config->conf = 0; 890 891 return enetc_send_cmd(priv->si, &cbd); 892 } 893 894 static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index) 895 { 896 struct enetc_stream_filter *f; 897 898 hlist_for_each_entry(f, &epsfp.stream_list, node) 899 if (f->sid.index == index) 900 return f; 901 902 return NULL; 903 } 904 905 static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index) 906 { 907 struct enetc_psfp_gate *g; 908 909 hlist_for_each_entry(g, &epsfp.psfp_gate_list, node) 910 if (g->index == index) 911 return g; 912 913 return NULL; 914 } 915 916 static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index) 917 { 918 struct enetc_psfp_filter *s; 919 920 hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) 921 if (s->index == index) 922 return s; 923 924 return NULL; 925 } 926 927 static struct enetc_psfp_meter *enetc_get_meter_by_index(u32 index) 928 { 929 struct enetc_psfp_meter *m; 930 931 hlist_for_each_entry(m, &epsfp.psfp_meter_list, node) 932 if (m->index == index) 933 return m; 934 935 return NULL; 936 } 937 938 static struct enetc_psfp_filter 939 *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi) 940 { 941 struct enetc_psfp_filter *s; 942 943 hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) 944 if (s->gate_id == sfi->gate_id && 945 s->prio == sfi->prio && 946 s->maxsdu == sfi->maxsdu && 947 s->meter_id == sfi->meter_id) 948 return s; 949 950 return NULL; 951 } 952 953 static int enetc_get_free_index(struct enetc_ndev_priv *priv) 954 { 955 u32 max_size = priv->psfp_cap.max_psfp_filter; 956 unsigned long index; 957 958 index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size); 959 if (index == max_size) 960 return -1; 961 962 return index; 963 } 964 965 static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index) 966 { 967 struct enetc_psfp_filter *sfi; 968 u8 z; 969 970 sfi = enetc_get_filter_by_index(index); 971 WARN_ON(!sfi); 972 z = refcount_dec_and_test(&sfi->refcount); 973 974 if (z) { 975 enetc_streamfilter_hw_set(priv, sfi, false); 976 hlist_del(&sfi->node); 977 kfree(sfi); 978 clear_bit(index, epsfp.psfp_sfi_bitmap); 979 } 980 } 981 982 static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index) 983 { 984 struct enetc_psfp_gate *sgi; 985 u8 z; 986 987 sgi = enetc_get_gate_by_index(index); 988 WARN_ON(!sgi); 989 z = refcount_dec_and_test(&sgi->refcount); 990 if (z) { 991 enetc_streamgate_hw_set(priv, sgi, false); 992 hlist_del(&sgi->node); 993 kfree(sgi); 994 } 995 } 996 997 static void flow_meter_unref(struct enetc_ndev_priv *priv, u32 index) 998 { 999 struct enetc_psfp_meter *fmi; 1000 u8 z; 1001 1002 fmi = enetc_get_meter_by_index(index); 1003 WARN_ON(!fmi); 1004 z = refcount_dec_and_test(&fmi->refcount); 1005 if (z) { 1006 enetc_flowmeter_hw_set(priv, fmi, false); 1007 hlist_del(&fmi->node); 1008 kfree(fmi); 1009 } 1010 } 1011 1012 static void remove_one_chain(struct enetc_ndev_priv *priv, 1013 struct enetc_stream_filter *filter) 1014 { 1015 if (filter->flags & ENETC_PSFP_FLAGS_FMI) 1016 flow_meter_unref(priv, filter->fmi_index); 1017 1018 stream_gate_unref(priv, filter->sgi_index); 1019 stream_filter_unref(priv, filter->sfi_index); 1020 1021 hlist_del(&filter->node); 1022 kfree(filter); 1023 } 1024 1025 static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv, 1026 struct enetc_streamid *sid, 1027 struct enetc_psfp_filter *sfi, 1028 struct enetc_psfp_gate *sgi, 1029 struct enetc_psfp_meter *fmi) 1030 { 1031 int err; 1032 1033 err = enetc_streamid_hw_set(priv, sid, true); 1034 if (err) 1035 return err; 1036 1037 if (sfi) { 1038 err = enetc_streamfilter_hw_set(priv, sfi, true); 1039 if (err) 1040 goto revert_sid; 1041 } 1042 1043 err = enetc_streamgate_hw_set(priv, sgi, true); 1044 if (err) 1045 goto revert_sfi; 1046 1047 if (fmi) { 1048 err = enetc_flowmeter_hw_set(priv, fmi, true); 1049 if (err) 1050 goto revert_sgi; 1051 } 1052 1053 return 0; 1054 1055 revert_sgi: 1056 enetc_streamgate_hw_set(priv, sgi, false); 1057 revert_sfi: 1058 if (sfi) 1059 enetc_streamfilter_hw_set(priv, sfi, false); 1060 revert_sid: 1061 enetc_streamid_hw_set(priv, sid, false); 1062 return err; 1063 } 1064 1065 static struct actions_fwd *enetc_check_flow_actions(u64 acts, 1066 unsigned int inputkeys) 1067 { 1068 int i; 1069 1070 for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++) 1071 if (acts == enetc_act_fwd[i].actions && 1072 inputkeys & enetc_act_fwd[i].keys) 1073 return &enetc_act_fwd[i]; 1074 1075 return NULL; 1076 } 1077 1078 static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, 1079 struct flow_cls_offload *f) 1080 { 1081 struct flow_action_entry *entryg = NULL, *entryp = NULL; 1082 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 1083 struct netlink_ext_ack *extack = f->common.extack; 1084 struct enetc_stream_filter *filter, *old_filter; 1085 struct enetc_psfp_meter *fmi = NULL, *old_fmi; 1086 struct enetc_psfp_filter *sfi, *old_sfi; 1087 struct enetc_psfp_gate *sgi, *old_sgi; 1088 struct flow_action_entry *entry; 1089 struct action_gate_entry *e; 1090 u8 sfi_overwrite = 0; 1091 int entries_size; 1092 int i, err; 1093 1094 if (f->common.chain_index >= priv->psfp_cap.max_streamid) { 1095 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); 1096 return -ENOSPC; 1097 } 1098 1099 flow_action_for_each(i, entry, &rule->action) 1100 if (entry->id == FLOW_ACTION_GATE) 1101 entryg = entry; 1102 else if (entry->id == FLOW_ACTION_POLICE) 1103 entryp = entry; 1104 1105 /* Not support without gate action */ 1106 if (!entryg) 1107 return -EINVAL; 1108 1109 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 1110 if (!filter) 1111 return -ENOMEM; 1112 1113 filter->sid.index = f->common.chain_index; 1114 1115 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 1116 struct flow_match_eth_addrs match; 1117 1118 flow_rule_match_eth_addrs(rule, &match); 1119 1120 if (!is_zero_ether_addr(match.mask->dst) && 1121 !is_zero_ether_addr(match.mask->src)) { 1122 NL_SET_ERR_MSG_MOD(extack, 1123 "Cannot match on both source and destination MAC"); 1124 err = -EINVAL; 1125 goto free_filter; 1126 } 1127 1128 if (!is_zero_ether_addr(match.mask->dst)) { 1129 if (!is_broadcast_ether_addr(match.mask->dst)) { 1130 NL_SET_ERR_MSG_MOD(extack, 1131 "Masked matching on destination MAC not supported"); 1132 err = -EINVAL; 1133 goto free_filter; 1134 } 1135 ether_addr_copy(filter->sid.dst_mac, match.key->dst); 1136 filter->sid.filtertype = STREAMID_TYPE_NULL; 1137 } 1138 1139 if (!is_zero_ether_addr(match.mask->src)) { 1140 if (!is_broadcast_ether_addr(match.mask->src)) { 1141 NL_SET_ERR_MSG_MOD(extack, 1142 "Masked matching on source MAC not supported"); 1143 err = -EINVAL; 1144 goto free_filter; 1145 } 1146 ether_addr_copy(filter->sid.src_mac, match.key->src); 1147 filter->sid.filtertype = STREAMID_TYPE_SMAC; 1148 } 1149 } else { 1150 NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS"); 1151 err = -EINVAL; 1152 goto free_filter; 1153 } 1154 1155 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 1156 struct flow_match_vlan match; 1157 1158 flow_rule_match_vlan(rule, &match); 1159 if (match.mask->vlan_priority) { 1160 if (match.mask->vlan_priority != 1161 (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) { 1162 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); 1163 err = -EINVAL; 1164 goto free_filter; 1165 } 1166 } 1167 1168 if (match.mask->vlan_id) { 1169 if (match.mask->vlan_id != VLAN_VID_MASK) { 1170 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id"); 1171 err = -EINVAL; 1172 goto free_filter; 1173 } 1174 1175 filter->sid.vid = match.key->vlan_id; 1176 if (!filter->sid.vid) 1177 filter->sid.tagged = STREAMID_VLAN_UNTAGGED; 1178 else 1179 filter->sid.tagged = STREAMID_VLAN_TAGGED; 1180 } 1181 } else { 1182 filter->sid.tagged = STREAMID_VLAN_ALL; 1183 } 1184 1185 /* parsing gate action */ 1186 if (entryg->gate.index >= priv->psfp_cap.max_psfp_gate) { 1187 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); 1188 err = -ENOSPC; 1189 goto free_filter; 1190 } 1191 1192 if (entryg->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) { 1193 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); 1194 err = -ENOSPC; 1195 goto free_filter; 1196 } 1197 1198 entries_size = struct_size(sgi, entries, entryg->gate.num_entries); 1199 sgi = kzalloc(entries_size, GFP_KERNEL); 1200 if (!sgi) { 1201 err = -ENOMEM; 1202 goto free_filter; 1203 } 1204 1205 refcount_set(&sgi->refcount, 1); 1206 sgi->index = entryg->gate.index; 1207 sgi->init_ipv = entryg->gate.prio; 1208 sgi->basetime = entryg->gate.basetime; 1209 sgi->cycletime = entryg->gate.cycletime; 1210 sgi->num_entries = entryg->gate.num_entries; 1211 1212 e = sgi->entries; 1213 for (i = 0; i < entryg->gate.num_entries; i++) { 1214 e[i].gate_state = entryg->gate.entries[i].gate_state; 1215 e[i].interval = entryg->gate.entries[i].interval; 1216 e[i].ipv = entryg->gate.entries[i].ipv; 1217 e[i].maxoctets = entryg->gate.entries[i].maxoctets; 1218 } 1219 1220 filter->sgi_index = sgi->index; 1221 1222 sfi = kzalloc(sizeof(*sfi), GFP_KERNEL); 1223 if (!sfi) { 1224 err = -ENOMEM; 1225 goto free_gate; 1226 } 1227 1228 refcount_set(&sfi->refcount, 1); 1229 sfi->gate_id = sgi->index; 1230 sfi->meter_id = ENETC_PSFP_WILDCARD; 1231 1232 /* Flow meter and max frame size */ 1233 if (entryp) { 1234 if (entryp->police.burst) { 1235 fmi = kzalloc(sizeof(*fmi), GFP_KERNEL); 1236 if (!fmi) { 1237 err = -ENOMEM; 1238 goto free_sfi; 1239 } 1240 refcount_set(&fmi->refcount, 1); 1241 fmi->cir = entryp->police.rate_bytes_ps; 1242 fmi->cbs = entryp->police.burst; 1243 fmi->index = entryp->police.index; 1244 filter->flags |= ENETC_PSFP_FLAGS_FMI; 1245 filter->fmi_index = fmi->index; 1246 sfi->meter_id = fmi->index; 1247 } 1248 1249 if (entryp->police.mtu) 1250 sfi->maxsdu = entryp->police.mtu; 1251 } 1252 1253 /* prio ref the filter prio */ 1254 if (f->common.prio && f->common.prio <= BIT(3)) 1255 sfi->prio = f->common.prio - 1; 1256 else 1257 sfi->prio = ENETC_PSFP_WILDCARD; 1258 1259 old_sfi = enetc_psfp_check_sfi(sfi); 1260 if (!old_sfi) { 1261 int index; 1262 1263 index = enetc_get_free_index(priv); 1264 if (sfi->handle < 0) { 1265 NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!"); 1266 err = -ENOSPC; 1267 goto free_fmi; 1268 } 1269 1270 sfi->index = index; 1271 sfi->handle = index + HANDLE_OFFSET; 1272 /* Update the stream filter handle also */ 1273 filter->sid.handle = sfi->handle; 1274 filter->sfi_index = sfi->index; 1275 sfi_overwrite = 0; 1276 } else { 1277 filter->sfi_index = old_sfi->index; 1278 filter->sid.handle = old_sfi->handle; 1279 sfi_overwrite = 1; 1280 } 1281 1282 err = enetc_psfp_hw_set(priv, &filter->sid, 1283 sfi_overwrite ? NULL : sfi, sgi, fmi); 1284 if (err) 1285 goto free_fmi; 1286 1287 spin_lock(&epsfp.psfp_lock); 1288 if (filter->flags & ENETC_PSFP_FLAGS_FMI) { 1289 old_fmi = enetc_get_meter_by_index(filter->fmi_index); 1290 if (old_fmi) { 1291 fmi->refcount = old_fmi->refcount; 1292 refcount_set(&fmi->refcount, 1293 refcount_read(&old_fmi->refcount) + 1); 1294 hlist_del(&old_fmi->node); 1295 kfree(old_fmi); 1296 } 1297 hlist_add_head(&fmi->node, &epsfp.psfp_meter_list); 1298 } 1299 1300 /* Remove the old node if exist and update with a new node */ 1301 old_sgi = enetc_get_gate_by_index(filter->sgi_index); 1302 if (old_sgi) { 1303 refcount_set(&sgi->refcount, 1304 refcount_read(&old_sgi->refcount) + 1); 1305 hlist_del(&old_sgi->node); 1306 kfree(old_sgi); 1307 } 1308 1309 hlist_add_head(&sgi->node, &epsfp.psfp_gate_list); 1310 1311 if (!old_sfi) { 1312 hlist_add_head(&sfi->node, &epsfp.psfp_filter_list); 1313 set_bit(sfi->index, epsfp.psfp_sfi_bitmap); 1314 } else { 1315 kfree(sfi); 1316 refcount_inc(&old_sfi->refcount); 1317 } 1318 1319 old_filter = enetc_get_stream_by_index(filter->sid.index); 1320 if (old_filter) 1321 remove_one_chain(priv, old_filter); 1322 1323 filter->stats.lastused = jiffies; 1324 hlist_add_head(&filter->node, &epsfp.stream_list); 1325 1326 spin_unlock(&epsfp.psfp_lock); 1327 1328 return 0; 1329 1330 free_fmi: 1331 kfree(fmi); 1332 free_sfi: 1333 kfree(sfi); 1334 free_gate: 1335 kfree(sgi); 1336 free_filter: 1337 kfree(filter); 1338 1339 return err; 1340 } 1341 1342 static int enetc_config_clsflower(struct enetc_ndev_priv *priv, 1343 struct flow_cls_offload *cls_flower) 1344 { 1345 struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); 1346 struct netlink_ext_ack *extack = cls_flower->common.extack; 1347 struct flow_dissector *dissector = rule->match.dissector; 1348 struct flow_action *action = &rule->action; 1349 struct flow_action_entry *entry; 1350 struct actions_fwd *fwd; 1351 u64 actions = 0; 1352 int i, err; 1353 1354 if (!flow_action_has_entries(action)) { 1355 NL_SET_ERR_MSG_MOD(extack, "At least one action is needed"); 1356 return -EINVAL; 1357 } 1358 1359 flow_action_for_each(i, entry, action) 1360 actions |= BIT(entry->id); 1361 1362 fwd = enetc_check_flow_actions(actions, dissector->used_keys); 1363 if (!fwd) { 1364 NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!"); 1365 return -EOPNOTSUPP; 1366 } 1367 1368 if (fwd->output & FILTER_ACTION_TYPE_PSFP) { 1369 err = enetc_psfp_parse_clsflower(priv, cls_flower); 1370 if (err) { 1371 NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs"); 1372 return err; 1373 } 1374 } else { 1375 NL_SET_ERR_MSG_MOD(extack, "Unsupported actions"); 1376 return -EOPNOTSUPP; 1377 } 1378 1379 return 0; 1380 } 1381 1382 static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv, 1383 struct flow_cls_offload *f) 1384 { 1385 struct enetc_stream_filter *filter; 1386 struct netlink_ext_ack *extack = f->common.extack; 1387 int err; 1388 1389 if (f->common.chain_index >= priv->psfp_cap.max_streamid) { 1390 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); 1391 return -ENOSPC; 1392 } 1393 1394 filter = enetc_get_stream_by_index(f->common.chain_index); 1395 if (!filter) 1396 return -EINVAL; 1397 1398 err = enetc_streamid_hw_set(priv, &filter->sid, false); 1399 if (err) 1400 return err; 1401 1402 remove_one_chain(priv, filter); 1403 1404 return 0; 1405 } 1406 1407 static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv, 1408 struct flow_cls_offload *f) 1409 { 1410 return enetc_psfp_destroy_clsflower(priv, f); 1411 } 1412 1413 static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv, 1414 struct flow_cls_offload *f) 1415 { 1416 struct psfp_streamfilter_counters counters = {}; 1417 struct enetc_stream_filter *filter; 1418 struct flow_stats stats = {}; 1419 int err; 1420 1421 filter = enetc_get_stream_by_index(f->common.chain_index); 1422 if (!filter) 1423 return -EINVAL; 1424 1425 err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters); 1426 if (err) 1427 return -EINVAL; 1428 1429 spin_lock(&epsfp.psfp_lock); 1430 stats.pkts = counters.matching_frames_count + 1431 counters.not_passing_sdu_count - 1432 filter->stats.pkts; 1433 stats.drops = counters.not_passing_frames_count + 1434 counters.not_passing_sdu_count + 1435 counters.red_frames_count - 1436 filter->stats.drops; 1437 stats.lastused = filter->stats.lastused; 1438 filter->stats.pkts += stats.pkts; 1439 filter->stats.drops += stats.drops; 1440 spin_unlock(&epsfp.psfp_lock); 1441 1442 flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, 1443 stats.lastused, FLOW_ACTION_HW_STATS_DELAYED); 1444 1445 return 0; 1446 } 1447 1448 static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv, 1449 struct flow_cls_offload *cls_flower) 1450 { 1451 switch (cls_flower->command) { 1452 case FLOW_CLS_REPLACE: 1453 return enetc_config_clsflower(priv, cls_flower); 1454 case FLOW_CLS_DESTROY: 1455 return enetc_destroy_clsflower(priv, cls_flower); 1456 case FLOW_CLS_STATS: 1457 return enetc_psfp_get_stats(priv, cls_flower); 1458 default: 1459 return -EOPNOTSUPP; 1460 } 1461 } 1462 1463 static inline void clean_psfp_sfi_bitmap(void) 1464 { 1465 bitmap_free(epsfp.psfp_sfi_bitmap); 1466 epsfp.psfp_sfi_bitmap = NULL; 1467 } 1468 1469 static void clean_stream_list(void) 1470 { 1471 struct enetc_stream_filter *s; 1472 struct hlist_node *tmp; 1473 1474 hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) { 1475 hlist_del(&s->node); 1476 kfree(s); 1477 } 1478 } 1479 1480 static void clean_sfi_list(void) 1481 { 1482 struct enetc_psfp_filter *sfi; 1483 struct hlist_node *tmp; 1484 1485 hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) { 1486 hlist_del(&sfi->node); 1487 kfree(sfi); 1488 } 1489 } 1490 1491 static void clean_sgi_list(void) 1492 { 1493 struct enetc_psfp_gate *sgi; 1494 struct hlist_node *tmp; 1495 1496 hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) { 1497 hlist_del(&sgi->node); 1498 kfree(sgi); 1499 } 1500 } 1501 1502 static void clean_psfp_all(void) 1503 { 1504 /* Disable all list nodes and free all memory */ 1505 clean_sfi_list(); 1506 clean_sgi_list(); 1507 clean_stream_list(); 1508 epsfp.dev_bitmap = 0; 1509 clean_psfp_sfi_bitmap(); 1510 } 1511 1512 int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1513 void *cb_priv) 1514 { 1515 struct net_device *ndev = cb_priv; 1516 1517 if (!tc_can_offload(ndev)) 1518 return -EOPNOTSUPP; 1519 1520 switch (type) { 1521 case TC_SETUP_CLSFLOWER: 1522 return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data); 1523 default: 1524 return -EOPNOTSUPP; 1525 } 1526 } 1527 1528 int enetc_psfp_init(struct enetc_ndev_priv *priv) 1529 { 1530 if (epsfp.psfp_sfi_bitmap) 1531 return 0; 1532 1533 epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter, 1534 GFP_KERNEL); 1535 if (!epsfp.psfp_sfi_bitmap) 1536 return -ENOMEM; 1537 1538 spin_lock_init(&epsfp.psfp_lock); 1539 1540 if (list_empty(&enetc_block_cb_list)) 1541 epsfp.dev_bitmap = 0; 1542 1543 return 0; 1544 } 1545 1546 int enetc_psfp_clean(struct enetc_ndev_priv *priv) 1547 { 1548 if (!list_empty(&enetc_block_cb_list)) 1549 return -EBUSY; 1550 1551 clean_psfp_all(); 1552 1553 return 0; 1554 } 1555 1556 int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data) 1557 { 1558 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1559 struct flow_block_offload *f = type_data; 1560 int err; 1561 1562 err = flow_block_cb_setup_simple(f, &enetc_block_cb_list, 1563 enetc_setup_tc_block_cb, 1564 ndev, ndev, true); 1565 if (err) 1566 return err; 1567 1568 switch (f->command) { 1569 case FLOW_BLOCK_BIND: 1570 set_bit(enetc_get_port(priv), &epsfp.dev_bitmap); 1571 break; 1572 case FLOW_BLOCK_UNBIND: 1573 clear_bit(enetc_get_port(priv), &epsfp.dev_bitmap); 1574 if (!epsfp.dev_bitmap) 1575 clean_psfp_all(); 1576 break; 1577 } 1578 1579 return 0; 1580 } 1581