1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2019 NXP */ 3 4 #include "enetc.h" 5 6 #include <net/pkt_sched.h> 7 #include <linux/math64.h> 8 #include <linux/refcount.h> 9 #include <net/pkt_cls.h> 10 #include <net/tc_act/tc_gate.h> 11 12 static u16 enetc_get_max_gcl_len(struct enetc_hw *hw) 13 { 14 return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET) 15 & ENETC_QBV_MAX_GCL_LEN_MASK; 16 } 17 18 void enetc_sched_speed_set(struct net_device *ndev) 19 { 20 struct enetc_ndev_priv *priv = netdev_priv(ndev); 21 struct phy_device *phydev = ndev->phydev; 22 u32 old_speed = priv->speed; 23 u32 speed, pspeed; 24 25 if (phydev->speed == old_speed) 26 return; 27 28 speed = phydev->speed; 29 switch (speed) { 30 case SPEED_1000: 31 pspeed = ENETC_PMR_PSPEED_1000M; 32 break; 33 case SPEED_2500: 34 pspeed = ENETC_PMR_PSPEED_2500M; 35 break; 36 case SPEED_100: 37 pspeed = ENETC_PMR_PSPEED_100M; 38 break; 39 case SPEED_10: 40 default: 41 pspeed = ENETC_PMR_PSPEED_10M; 42 } 43 44 priv->speed = speed; 45 enetc_port_wr(&priv->si->hw, ENETC_PMR, 46 (enetc_port_rd(&priv->si->hw, ENETC_PMR) 47 & (~ENETC_PMR_PSPEED_MASK)) 48 | pspeed); 49 } 50 51 static int enetc_setup_taprio(struct net_device *ndev, 52 struct tc_taprio_qopt_offload *admin_conf) 53 { 54 struct enetc_ndev_priv *priv = netdev_priv(ndev); 55 struct enetc_cbd cbd = {.cmd = 0}; 56 struct tgs_gcl_conf *gcl_config; 57 struct tgs_gcl_data *gcl_data; 58 struct gce *gce; 59 dma_addr_t dma; 60 u16 data_size; 61 u16 gcl_len; 62 u32 tge; 63 int err; 64 int i; 65 66 if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw)) 67 return -EINVAL; 68 gcl_len = admin_conf->num_entries; 69 70 tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET); 71 if (!admin_conf->enable) { 72 enetc_wr(&priv->si->hw, 73 ENETC_QBV_PTGCR_OFFSET, 74 tge & (~ENETC_QBV_TGE)); 75 return 0; 76 } 77 78 if (admin_conf->cycle_time > U32_MAX || 79 admin_conf->cycle_time_extension > U32_MAX) 80 return -EINVAL; 81 82 /* Configure the (administrative) gate control list using the 83 * control BD descriptor. 84 */ 85 gcl_config = &cbd.gcl_conf; 86 87 data_size = struct_size(gcl_data, entry, gcl_len); 88 gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 89 if (!gcl_data) 90 return -ENOMEM; 91 92 gce = (struct gce *)(gcl_data + 1); 93 94 /* Set all gates open as default */ 95 gcl_config->atc = 0xff; 96 gcl_config->acl_len = cpu_to_le16(gcl_len); 97 98 if (!admin_conf->base_time) { 99 gcl_data->btl = 100 cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0)); 101 gcl_data->bth = 102 cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1)); 103 } else { 104 gcl_data->btl = 105 cpu_to_le32(lower_32_bits(admin_conf->base_time)); 106 gcl_data->bth = 107 cpu_to_le32(upper_32_bits(admin_conf->base_time)); 108 } 109 110 gcl_data->ct = cpu_to_le32(admin_conf->cycle_time); 111 gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension); 112 113 for (i = 0; i < gcl_len; i++) { 114 struct tc_taprio_sched_entry *temp_entry; 115 struct gce *temp_gce = gce + i; 116 117 temp_entry = &admin_conf->entries[i]; 118 119 temp_gce->gate = (u8)temp_entry->gate_mask; 120 temp_gce->period = cpu_to_le32(temp_entry->interval); 121 } 122 123 cbd.length = cpu_to_le16(data_size); 124 cbd.status_flags = 0; 125 126 dma = dma_map_single(&priv->si->pdev->dev, gcl_data, 127 data_size, DMA_TO_DEVICE); 128 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 129 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 130 kfree(gcl_data); 131 return -ENOMEM; 132 } 133 134 cbd.addr[0] = lower_32_bits(dma); 135 cbd.addr[1] = upper_32_bits(dma); 136 cbd.cls = BDCR_CMD_PORT_GCL; 137 cbd.status_flags = 0; 138 139 enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET, 140 tge | ENETC_QBV_TGE); 141 142 err = enetc_send_cmd(priv->si, &cbd); 143 if (err) 144 enetc_wr(&priv->si->hw, 145 ENETC_QBV_PTGCR_OFFSET, 146 tge & (~ENETC_QBV_TGE)); 147 148 dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE); 149 kfree(gcl_data); 150 151 return err; 152 } 153 154 int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) 155 { 156 struct tc_taprio_qopt_offload *taprio = type_data; 157 struct enetc_ndev_priv *priv = netdev_priv(ndev); 158 int err; 159 int i; 160 161 /* TSD and Qbv are mutually exclusive in hardware */ 162 for (i = 0; i < priv->num_tx_rings; i++) 163 if (priv->tx_ring[i]->tsd_enable) 164 return -EBUSY; 165 166 for (i = 0; i < priv->num_tx_rings; i++) 167 enetc_set_bdr_prio(&priv->si->hw, 168 priv->tx_ring[i]->index, 169 taprio->enable ? i : 0); 170 171 err = enetc_setup_taprio(ndev, taprio); 172 173 if (err) 174 for (i = 0; i < priv->num_tx_rings; i++) 175 enetc_set_bdr_prio(&priv->si->hw, 176 priv->tx_ring[i]->index, 177 taprio->enable ? 0 : i); 178 179 return err; 180 } 181 182 static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc) 183 { 184 return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE; 185 } 186 187 static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc) 188 { 189 return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK; 190 } 191 192 int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) 193 { 194 struct enetc_ndev_priv *priv = netdev_priv(ndev); 195 struct tc_cbs_qopt_offload *cbs = type_data; 196 u32 port_transmit_rate = priv->speed; 197 u8 tc_nums = netdev_get_num_tc(ndev); 198 struct enetc_si *si = priv->si; 199 u32 hi_credit_bit, hi_credit_reg; 200 u32 max_interference_size; 201 u32 port_frame_max_size; 202 u8 tc = cbs->queue; 203 u8 prio_top, prio_next; 204 int bw_sum = 0; 205 u8 bw; 206 207 prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1); 208 prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2); 209 210 /* Support highest prio and second prio tc in cbs mode */ 211 if (tc != prio_top && tc != prio_next) 212 return -EOPNOTSUPP; 213 214 if (!cbs->enable) { 215 /* Make sure the other TC that are numerically 216 * lower than this TC have been disabled. 217 */ 218 if (tc == prio_top && 219 enetc_get_cbs_enable(&si->hw, prio_next)) { 220 dev_err(&ndev->dev, 221 "Disable TC%d before disable TC%d\n", 222 prio_next, tc); 223 return -EINVAL; 224 } 225 226 enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0); 227 enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0); 228 229 return 0; 230 } 231 232 if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L || 233 cbs->idleslope < 0 || cbs->sendslope > 0) 234 return -EOPNOTSUPP; 235 236 port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 237 238 bw = cbs->idleslope / (port_transmit_rate * 10UL); 239 240 /* Make sure the other TC that are numerically 241 * higher than this TC have been enabled. 242 */ 243 if (tc == prio_next) { 244 if (!enetc_get_cbs_enable(&si->hw, prio_top)) { 245 dev_err(&ndev->dev, 246 "Enable TC%d first before enable TC%d\n", 247 prio_top, prio_next); 248 return -EINVAL; 249 } 250 bw_sum += enetc_get_cbs_bw(&si->hw, prio_top); 251 } 252 253 if (bw_sum + bw >= 100) { 254 dev_err(&ndev->dev, 255 "The sum of all CBS Bandwidth can't exceed 100\n"); 256 return -EINVAL; 257 } 258 259 enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc)); 260 261 /* For top prio TC, the max_interfrence_size is maxSizedFrame. 262 * 263 * For next prio TC, the max_interfrence_size is calculated as below: 264 * 265 * max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra) 266 * 267 * - RA: idleSlope for AVB Class A 268 * - R0: port transmit rate 269 * - M0: maximum sized frame for the port 270 * - MA: maximum sized frame for AVB Class A 271 */ 272 273 if (tc == prio_top) { 274 max_interference_size = port_frame_max_size * 8; 275 } else { 276 u32 m0, ma, r0, ra; 277 278 m0 = port_frame_max_size * 8; 279 ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8; 280 ra = enetc_get_cbs_bw(&si->hw, prio_top) * 281 port_transmit_rate * 10000ULL; 282 r0 = port_transmit_rate * 1000000ULL; 283 max_interference_size = m0 + ma + 284 (u32)div_u64((u64)ra * m0, r0 - ra); 285 } 286 287 /* hiCredit bits calculate by: 288 * 289 * maxSizedFrame * (idleSlope/portTxRate) 290 */ 291 hi_credit_bit = max_interference_size * bw / 100; 292 293 /* hiCredit bits to hiCredit register need to calculated as: 294 * 295 * (enetClockFrequency / portTransmitRate) * 100 296 */ 297 hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit, 298 port_transmit_rate * 1000000ULL); 299 300 enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg); 301 302 /* Set bw register and enable this traffic class */ 303 enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE); 304 305 return 0; 306 } 307 308 int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) 309 { 310 struct enetc_ndev_priv *priv = netdev_priv(ndev); 311 struct tc_etf_qopt_offload *qopt = type_data; 312 u8 tc_nums = netdev_get_num_tc(ndev); 313 int tc; 314 315 if (!tc_nums) 316 return -EOPNOTSUPP; 317 318 tc = qopt->queue; 319 320 if (tc < 0 || tc >= priv->num_tx_rings) 321 return -EINVAL; 322 323 /* Do not support TXSTART and TX CSUM offload simutaniously */ 324 if (ndev->features & NETIF_F_CSUM_MASK) 325 return -EBUSY; 326 327 /* TSD and Qbv are mutually exclusive in hardware */ 328 if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) 329 return -EBUSY; 330 331 priv->tx_ring[tc]->tsd_enable = qopt->enable; 332 enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc), 333 qopt->enable ? ENETC_TSDE : 0); 334 335 return 0; 336 } 337 338 enum streamid_type { 339 STREAMID_TYPE_RESERVED = 0, 340 STREAMID_TYPE_NULL, 341 STREAMID_TYPE_SMAC, 342 }; 343 344 enum streamid_vlan_tagged { 345 STREAMID_VLAN_RESERVED = 0, 346 STREAMID_VLAN_TAGGED, 347 STREAMID_VLAN_UNTAGGED, 348 STREAMID_VLAN_ALL, 349 }; 350 351 #define ENETC_PSFP_WILDCARD -1 352 #define HANDLE_OFFSET 100 353 354 enum forward_type { 355 FILTER_ACTION_TYPE_PSFP = BIT(0), 356 FILTER_ACTION_TYPE_ACL = BIT(1), 357 FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0), 358 }; 359 360 /* This is for limit output type for input actions */ 361 struct actions_fwd { 362 u64 actions; 363 u64 keys; /* include the must needed keys */ 364 enum forward_type output; 365 }; 366 367 struct psfp_streamfilter_counters { 368 u64 matching_frames_count; 369 u64 passing_frames_count; 370 u64 not_passing_frames_count; 371 u64 passing_sdu_count; 372 u64 not_passing_sdu_count; 373 u64 red_frames_count; 374 }; 375 376 struct enetc_streamid { 377 u32 index; 378 union { 379 u8 src_mac[6]; 380 u8 dst_mac[6]; 381 }; 382 u8 filtertype; 383 u16 vid; 384 u8 tagged; 385 s32 handle; 386 }; 387 388 struct enetc_psfp_filter { 389 u32 index; 390 s32 handle; 391 s8 prio; 392 u32 gate_id; 393 s32 meter_id; 394 refcount_t refcount; 395 struct hlist_node node; 396 }; 397 398 struct enetc_psfp_gate { 399 u32 index; 400 s8 init_ipv; 401 u64 basetime; 402 u64 cycletime; 403 u64 cycletimext; 404 u32 num_entries; 405 refcount_t refcount; 406 struct hlist_node node; 407 struct action_gate_entry entries[0]; 408 }; 409 410 struct enetc_stream_filter { 411 struct enetc_streamid sid; 412 u32 sfi_index; 413 u32 sgi_index; 414 struct flow_stats stats; 415 struct hlist_node node; 416 }; 417 418 struct enetc_psfp { 419 unsigned long dev_bitmap; 420 unsigned long *psfp_sfi_bitmap; 421 struct hlist_head stream_list; 422 struct hlist_head psfp_filter_list; 423 struct hlist_head psfp_gate_list; 424 spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */ 425 }; 426 427 static struct actions_fwd enetc_act_fwd[] = { 428 { 429 BIT(FLOW_ACTION_GATE), 430 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), 431 FILTER_ACTION_TYPE_PSFP 432 }, 433 /* example for ACL actions */ 434 { 435 BIT(FLOW_ACTION_DROP), 436 0, 437 FILTER_ACTION_TYPE_ACL 438 } 439 }; 440 441 static struct enetc_psfp epsfp = { 442 .psfp_sfi_bitmap = NULL, 443 }; 444 445 static LIST_HEAD(enetc_block_cb_list); 446 447 static inline int enetc_get_port(struct enetc_ndev_priv *priv) 448 { 449 return priv->si->pdev->devfn & 0x7; 450 } 451 452 /* Stream Identity Entry Set Descriptor */ 453 static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, 454 struct enetc_streamid *sid, 455 u8 enable) 456 { 457 struct enetc_cbd cbd = {.cmd = 0}; 458 struct streamid_data *si_data; 459 struct streamid_conf *si_conf; 460 u16 data_size; 461 dma_addr_t dma; 462 int err; 463 464 if (sid->index >= priv->psfp_cap.max_streamid) 465 return -EINVAL; 466 467 if (sid->filtertype != STREAMID_TYPE_NULL && 468 sid->filtertype != STREAMID_TYPE_SMAC) 469 return -EOPNOTSUPP; 470 471 /* Disable operation before enable */ 472 cbd.index = cpu_to_le16((u16)sid->index); 473 cbd.cls = BDCR_CMD_STREAM_IDENTIFY; 474 cbd.status_flags = 0; 475 476 data_size = sizeof(struct streamid_data); 477 si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 478 cbd.length = cpu_to_le16(data_size); 479 480 dma = dma_map_single(&priv->si->pdev->dev, si_data, 481 data_size, DMA_FROM_DEVICE); 482 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 483 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 484 kfree(si_data); 485 return -ENOMEM; 486 } 487 488 cbd.addr[0] = lower_32_bits(dma); 489 cbd.addr[1] = upper_32_bits(dma); 490 memset(si_data->dmac, 0xff, ETH_ALEN); 491 si_data->vid_vidm_tg = 492 cpu_to_le16(ENETC_CBDR_SID_VID_MASK 493 + ((0x3 << 14) | ENETC_CBDR_SID_VIDM)); 494 495 si_conf = &cbd.sid_set; 496 /* Only one port supported for one entry, set itself */ 497 si_conf->iports = 1 << enetc_get_port(priv); 498 si_conf->id_type = 1; 499 si_conf->oui[2] = 0x0; 500 si_conf->oui[1] = 0x80; 501 si_conf->oui[0] = 0xC2; 502 503 err = enetc_send_cmd(priv->si, &cbd); 504 if (err) 505 return -EINVAL; 506 507 if (!enable) { 508 kfree(si_data); 509 return 0; 510 } 511 512 /* Enable the entry overwrite again incase space flushed by hardware */ 513 memset(&cbd, 0, sizeof(cbd)); 514 515 cbd.index = cpu_to_le16((u16)sid->index); 516 cbd.cmd = 0; 517 cbd.cls = BDCR_CMD_STREAM_IDENTIFY; 518 cbd.status_flags = 0; 519 520 si_conf->en = 0x80; 521 si_conf->stream_handle = cpu_to_le32(sid->handle); 522 si_conf->iports = 1 << enetc_get_port(priv); 523 si_conf->id_type = sid->filtertype; 524 si_conf->oui[2] = 0x0; 525 si_conf->oui[1] = 0x80; 526 si_conf->oui[0] = 0xC2; 527 528 memset(si_data, 0, data_size); 529 530 cbd.length = cpu_to_le16(data_size); 531 532 cbd.addr[0] = lower_32_bits(dma); 533 cbd.addr[1] = upper_32_bits(dma); 534 535 /* VIDM default to be 1. 536 * VID Match. If set (b1) then the VID must match, otherwise 537 * any VID is considered a match. VIDM setting is only used 538 * when TG is set to b01. 539 */ 540 if (si_conf->id_type == STREAMID_TYPE_NULL) { 541 ether_addr_copy(si_data->dmac, sid->dst_mac); 542 si_data->vid_vidm_tg = 543 cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) + 544 ((((u16)(sid->tagged) & 0x3) << 14) 545 | ENETC_CBDR_SID_VIDM)); 546 } else if (si_conf->id_type == STREAMID_TYPE_SMAC) { 547 ether_addr_copy(si_data->smac, sid->src_mac); 548 si_data->vid_vidm_tg = 549 cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) + 550 ((((u16)(sid->tagged) & 0x3) << 14) 551 | ENETC_CBDR_SID_VIDM)); 552 } 553 554 err = enetc_send_cmd(priv->si, &cbd); 555 kfree(si_data); 556 557 return err; 558 } 559 560 /* Stream Filter Instance Set Descriptor */ 561 static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv, 562 struct enetc_psfp_filter *sfi, 563 u8 enable) 564 { 565 struct enetc_cbd cbd = {.cmd = 0}; 566 struct sfi_conf *sfi_config; 567 568 cbd.index = cpu_to_le16(sfi->index); 569 cbd.cls = BDCR_CMD_STREAM_FILTER; 570 cbd.status_flags = 0x80; 571 cbd.length = cpu_to_le16(1); 572 573 sfi_config = &cbd.sfi_conf; 574 if (!enable) 575 goto exit; 576 577 sfi_config->en = 0x80; 578 579 if (sfi->handle >= 0) { 580 sfi_config->stream_handle = 581 cpu_to_le32(sfi->handle); 582 sfi_config->sthm |= 0x80; 583 } 584 585 sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id); 586 sfi_config->input_ports = 1 << enetc_get_port(priv); 587 588 /* The priority value which may be matched against the 589 * frame’s priority value to determine a match for this entry. 590 */ 591 if (sfi->prio >= 0) 592 sfi_config->multi |= (sfi->prio & 0x7) | 0x8; 593 594 /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX 595 * field as being either an MSDU value or an index into the Flow 596 * Meter Instance table. 597 * TODO: no limit max sdu 598 */ 599 600 if (sfi->meter_id >= 0) { 601 sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id); 602 sfi_config->multi |= 0x80; 603 } 604 605 exit: 606 return enetc_send_cmd(priv->si, &cbd); 607 } 608 609 static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv, 610 u32 index, 611 struct psfp_streamfilter_counters *cnt) 612 { 613 struct enetc_cbd cbd = { .cmd = 2 }; 614 struct sfi_counter_data *data_buf; 615 dma_addr_t dma; 616 u16 data_size; 617 int err; 618 619 cbd.index = cpu_to_le16((u16)index); 620 cbd.cmd = 2; 621 cbd.cls = BDCR_CMD_STREAM_FILTER; 622 cbd.status_flags = 0; 623 624 data_size = sizeof(struct sfi_counter_data); 625 data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 626 if (!data_buf) 627 return -ENOMEM; 628 629 dma = dma_map_single(&priv->si->pdev->dev, data_buf, 630 data_size, DMA_FROM_DEVICE); 631 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 632 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 633 err = -ENOMEM; 634 goto exit; 635 } 636 cbd.addr[0] = lower_32_bits(dma); 637 cbd.addr[1] = upper_32_bits(dma); 638 639 cbd.length = cpu_to_le16(data_size); 640 641 err = enetc_send_cmd(priv->si, &cbd); 642 if (err) 643 goto exit; 644 645 cnt->matching_frames_count = 646 ((u64)le32_to_cpu(data_buf->matchh) << 32) 647 + data_buf->matchl; 648 649 cnt->not_passing_sdu_count = 650 ((u64)le32_to_cpu(data_buf->msdu_droph) << 32) 651 + data_buf->msdu_dropl; 652 653 cnt->passing_sdu_count = cnt->matching_frames_count 654 - cnt->not_passing_sdu_count; 655 656 cnt->not_passing_frames_count = 657 ((u64)le32_to_cpu(data_buf->stream_gate_droph) << 32) 658 + le32_to_cpu(data_buf->stream_gate_dropl); 659 660 cnt->passing_frames_count = cnt->matching_frames_count 661 - cnt->not_passing_sdu_count 662 - cnt->not_passing_frames_count; 663 664 cnt->red_frames_count = 665 ((u64)le32_to_cpu(data_buf->flow_meter_droph) << 32) 666 + le32_to_cpu(data_buf->flow_meter_dropl); 667 668 exit: 669 kfree(data_buf); 670 return err; 671 } 672 673 static u64 get_ptp_now(struct enetc_hw *hw) 674 { 675 u64 now_lo, now_hi, now; 676 677 now_lo = enetc_rd(hw, ENETC_SICTR0); 678 now_hi = enetc_rd(hw, ENETC_SICTR1); 679 now = now_lo | now_hi << 32; 680 681 return now; 682 } 683 684 static int get_start_ns(u64 now, u64 cycle, u64 *start) 685 { 686 u64 n; 687 688 if (!cycle) 689 return -EFAULT; 690 691 n = div64_u64(now, cycle); 692 693 *start = (n + 1) * cycle; 694 695 return 0; 696 } 697 698 /* Stream Gate Instance Set Descriptor */ 699 static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, 700 struct enetc_psfp_gate *sgi, 701 u8 enable) 702 { 703 struct enetc_cbd cbd = { .cmd = 0 }; 704 struct sgi_table *sgi_config; 705 struct sgcl_conf *sgcl_config; 706 struct sgcl_data *sgcl_data; 707 struct sgce *sgce; 708 dma_addr_t dma; 709 u16 data_size; 710 int err, i; 711 u64 now; 712 713 cbd.index = cpu_to_le16(sgi->index); 714 cbd.cmd = 0; 715 cbd.cls = BDCR_CMD_STREAM_GCL; 716 cbd.status_flags = 0x80; 717 718 /* disable */ 719 if (!enable) 720 return enetc_send_cmd(priv->si, &cbd); 721 722 if (!sgi->num_entries) 723 return 0; 724 725 if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist || 726 !sgi->cycletime) 727 return -EINVAL; 728 729 /* enable */ 730 sgi_config = &cbd.sgi_table; 731 732 /* Keep open before gate list start */ 733 sgi_config->ocgtst = 0x80; 734 735 sgi_config->oipv = (sgi->init_ipv < 0) ? 736 0x0 : ((sgi->init_ipv & 0x7) | 0x8); 737 738 sgi_config->en = 0x80; 739 740 /* Basic config */ 741 err = enetc_send_cmd(priv->si, &cbd); 742 if (err) 743 return -EINVAL; 744 745 memset(&cbd, 0, sizeof(cbd)); 746 747 cbd.index = cpu_to_le16(sgi->index); 748 cbd.cmd = 1; 749 cbd.cls = BDCR_CMD_STREAM_GCL; 750 cbd.status_flags = 0; 751 752 sgcl_config = &cbd.sgcl_conf; 753 754 sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3; 755 756 data_size = struct_size(sgcl_data, sgcl, sgi->num_entries); 757 758 sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); 759 if (!sgcl_data) 760 return -ENOMEM; 761 762 cbd.length = cpu_to_le16(data_size); 763 764 dma = dma_map_single(&priv->si->pdev->dev, 765 sgcl_data, data_size, 766 DMA_FROM_DEVICE); 767 if (dma_mapping_error(&priv->si->pdev->dev, dma)) { 768 netdev_err(priv->si->ndev, "DMA mapping failed!\n"); 769 kfree(sgcl_data); 770 return -ENOMEM; 771 } 772 773 cbd.addr[0] = lower_32_bits(dma); 774 cbd.addr[1] = upper_32_bits(dma); 775 776 sgce = &sgcl_data->sgcl[0]; 777 778 sgcl_config->agtst = 0x80; 779 780 sgcl_data->ct = cpu_to_le32(sgi->cycletime); 781 sgcl_data->cte = cpu_to_le32(sgi->cycletimext); 782 783 if (sgi->init_ipv >= 0) 784 sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8; 785 786 for (i = 0; i < sgi->num_entries; i++) { 787 struct action_gate_entry *from = &sgi->entries[i]; 788 struct sgce *to = &sgce[i]; 789 790 if (from->gate_state) 791 to->multi |= 0x10; 792 793 if (from->ipv >= 0) 794 to->multi |= ((from->ipv & 0x7) << 5) | 0x08; 795 796 if (from->maxoctets >= 0) { 797 to->multi |= 0x01; 798 to->msdu[0] = from->maxoctets & 0xFF; 799 to->msdu[1] = (from->maxoctets >> 8) & 0xFF; 800 to->msdu[2] = (from->maxoctets >> 16) & 0xFF; 801 } 802 803 to->interval = cpu_to_le32(from->interval); 804 } 805 806 /* If basetime is less than now, calculate start time */ 807 now = get_ptp_now(&priv->si->hw); 808 809 if (sgi->basetime < now) { 810 u64 start; 811 812 err = get_start_ns(now, sgi->cycletime, &start); 813 if (err) 814 goto exit; 815 sgcl_data->btl = cpu_to_le32(lower_32_bits(start)); 816 sgcl_data->bth = cpu_to_le32(upper_32_bits(start)); 817 } else { 818 u32 hi, lo; 819 820 hi = upper_32_bits(sgi->basetime); 821 lo = lower_32_bits(sgi->basetime); 822 sgcl_data->bth = cpu_to_le32(hi); 823 sgcl_data->btl = cpu_to_le32(lo); 824 } 825 826 err = enetc_send_cmd(priv->si, &cbd); 827 828 exit: 829 kfree(sgcl_data); 830 831 return err; 832 } 833 834 static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index) 835 { 836 struct enetc_stream_filter *f; 837 838 hlist_for_each_entry(f, &epsfp.stream_list, node) 839 if (f->sid.index == index) 840 return f; 841 842 return NULL; 843 } 844 845 static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index) 846 { 847 struct enetc_psfp_gate *g; 848 849 hlist_for_each_entry(g, &epsfp.psfp_gate_list, node) 850 if (g->index == index) 851 return g; 852 853 return NULL; 854 } 855 856 static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index) 857 { 858 struct enetc_psfp_filter *s; 859 860 hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) 861 if (s->index == index) 862 return s; 863 864 return NULL; 865 } 866 867 static struct enetc_psfp_filter 868 *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi) 869 { 870 struct enetc_psfp_filter *s; 871 872 hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) 873 if (s->gate_id == sfi->gate_id && 874 s->prio == sfi->prio && 875 s->meter_id == sfi->meter_id) 876 return s; 877 878 return NULL; 879 } 880 881 static int enetc_get_free_index(struct enetc_ndev_priv *priv) 882 { 883 u32 max_size = priv->psfp_cap.max_psfp_filter; 884 unsigned long index; 885 886 index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size); 887 if (index == max_size) 888 return -1; 889 890 return index; 891 } 892 893 static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index) 894 { 895 struct enetc_psfp_filter *sfi; 896 u8 z; 897 898 sfi = enetc_get_filter_by_index(index); 899 WARN_ON(!sfi); 900 z = refcount_dec_and_test(&sfi->refcount); 901 902 if (z) { 903 enetc_streamfilter_hw_set(priv, sfi, false); 904 hlist_del(&sfi->node); 905 kfree(sfi); 906 clear_bit(index, epsfp.psfp_sfi_bitmap); 907 } 908 } 909 910 static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index) 911 { 912 struct enetc_psfp_gate *sgi; 913 u8 z; 914 915 sgi = enetc_get_gate_by_index(index); 916 WARN_ON(!sgi); 917 z = refcount_dec_and_test(&sgi->refcount); 918 if (z) { 919 enetc_streamgate_hw_set(priv, sgi, false); 920 hlist_del(&sgi->node); 921 kfree(sgi); 922 } 923 } 924 925 static void remove_one_chain(struct enetc_ndev_priv *priv, 926 struct enetc_stream_filter *filter) 927 { 928 stream_gate_unref(priv, filter->sgi_index); 929 stream_filter_unref(priv, filter->sfi_index); 930 931 hlist_del(&filter->node); 932 kfree(filter); 933 } 934 935 static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv, 936 struct enetc_streamid *sid, 937 struct enetc_psfp_filter *sfi, 938 struct enetc_psfp_gate *sgi) 939 { 940 int err; 941 942 err = enetc_streamid_hw_set(priv, sid, true); 943 if (err) 944 return err; 945 946 if (sfi) { 947 err = enetc_streamfilter_hw_set(priv, sfi, true); 948 if (err) 949 goto revert_sid; 950 } 951 952 err = enetc_streamgate_hw_set(priv, sgi, true); 953 if (err) 954 goto revert_sfi; 955 956 return 0; 957 958 revert_sfi: 959 if (sfi) 960 enetc_streamfilter_hw_set(priv, sfi, false); 961 revert_sid: 962 enetc_streamid_hw_set(priv, sid, false); 963 return err; 964 } 965 966 static struct actions_fwd *enetc_check_flow_actions(u64 acts, 967 unsigned int inputkeys) 968 { 969 int i; 970 971 for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++) 972 if (acts == enetc_act_fwd[i].actions && 973 inputkeys & enetc_act_fwd[i].keys) 974 return &enetc_act_fwd[i]; 975 976 return NULL; 977 } 978 979 static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, 980 struct flow_cls_offload *f) 981 { 982 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 983 struct netlink_ext_ack *extack = f->common.extack; 984 struct enetc_stream_filter *filter, *old_filter; 985 struct enetc_psfp_filter *sfi, *old_sfi; 986 struct enetc_psfp_gate *sgi, *old_sgi; 987 struct flow_action_entry *entry; 988 struct action_gate_entry *e; 989 u8 sfi_overwrite = 0; 990 int entries_size; 991 int i, err; 992 993 if (f->common.chain_index >= priv->psfp_cap.max_streamid) { 994 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); 995 return -ENOSPC; 996 } 997 998 flow_action_for_each(i, entry, &rule->action) 999 if (entry->id == FLOW_ACTION_GATE) 1000 break; 1001 1002 if (entry->id != FLOW_ACTION_GATE) 1003 return -EINVAL; 1004 1005 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 1006 if (!filter) 1007 return -ENOMEM; 1008 1009 filter->sid.index = f->common.chain_index; 1010 1011 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 1012 struct flow_match_eth_addrs match; 1013 1014 flow_rule_match_eth_addrs(rule, &match); 1015 1016 if (!is_zero_ether_addr(match.mask->dst) && 1017 !is_zero_ether_addr(match.mask->src)) { 1018 NL_SET_ERR_MSG_MOD(extack, 1019 "Cannot match on both source and destination MAC"); 1020 err = EINVAL; 1021 goto free_filter; 1022 } 1023 1024 if (!is_zero_ether_addr(match.mask->dst)) { 1025 if (!is_broadcast_ether_addr(match.mask->dst)) { 1026 NL_SET_ERR_MSG_MOD(extack, 1027 "Masked matching on destination MAC not supported"); 1028 err = EINVAL; 1029 goto free_filter; 1030 } 1031 ether_addr_copy(filter->sid.dst_mac, match.key->dst); 1032 filter->sid.filtertype = STREAMID_TYPE_NULL; 1033 } 1034 1035 if (!is_zero_ether_addr(match.mask->src)) { 1036 if (!is_broadcast_ether_addr(match.mask->src)) { 1037 NL_SET_ERR_MSG_MOD(extack, 1038 "Masked matching on source MAC not supported"); 1039 err = EINVAL; 1040 goto free_filter; 1041 } 1042 ether_addr_copy(filter->sid.src_mac, match.key->src); 1043 filter->sid.filtertype = STREAMID_TYPE_SMAC; 1044 } 1045 } else { 1046 NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS"); 1047 err = EINVAL; 1048 goto free_filter; 1049 } 1050 1051 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 1052 struct flow_match_vlan match; 1053 1054 flow_rule_match_vlan(rule, &match); 1055 if (match.mask->vlan_priority) { 1056 if (match.mask->vlan_priority != 1057 (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) { 1058 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); 1059 err = -EINVAL; 1060 goto free_filter; 1061 } 1062 } 1063 1064 if (match.mask->vlan_id) { 1065 if (match.mask->vlan_id != VLAN_VID_MASK) { 1066 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id"); 1067 err = -EINVAL; 1068 goto free_filter; 1069 } 1070 1071 filter->sid.vid = match.key->vlan_id; 1072 if (!filter->sid.vid) 1073 filter->sid.tagged = STREAMID_VLAN_UNTAGGED; 1074 else 1075 filter->sid.tagged = STREAMID_VLAN_TAGGED; 1076 } 1077 } else { 1078 filter->sid.tagged = STREAMID_VLAN_ALL; 1079 } 1080 1081 /* parsing gate action */ 1082 if (entry->gate.index >= priv->psfp_cap.max_psfp_gate) { 1083 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); 1084 err = -ENOSPC; 1085 goto free_filter; 1086 } 1087 1088 if (entry->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) { 1089 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); 1090 err = -ENOSPC; 1091 goto free_filter; 1092 } 1093 1094 entries_size = struct_size(sgi, entries, entry->gate.num_entries); 1095 sgi = kzalloc(entries_size, GFP_KERNEL); 1096 if (!sgi) { 1097 err = -ENOMEM; 1098 goto free_filter; 1099 } 1100 1101 refcount_set(&sgi->refcount, 1); 1102 sgi->index = entry->gate.index; 1103 sgi->init_ipv = entry->gate.prio; 1104 sgi->basetime = entry->gate.basetime; 1105 sgi->cycletime = entry->gate.cycletime; 1106 sgi->num_entries = entry->gate.num_entries; 1107 1108 e = sgi->entries; 1109 for (i = 0; i < entry->gate.num_entries; i++) { 1110 e[i].gate_state = entry->gate.entries[i].gate_state; 1111 e[i].interval = entry->gate.entries[i].interval; 1112 e[i].ipv = entry->gate.entries[i].ipv; 1113 e[i].maxoctets = entry->gate.entries[i].maxoctets; 1114 } 1115 1116 filter->sgi_index = sgi->index; 1117 1118 sfi = kzalloc(sizeof(*sfi), GFP_KERNEL); 1119 if (!sfi) { 1120 err = -ENOMEM; 1121 goto free_gate; 1122 } 1123 1124 refcount_set(&sfi->refcount, 1); 1125 sfi->gate_id = sgi->index; 1126 1127 /* flow meter not support yet */ 1128 sfi->meter_id = ENETC_PSFP_WILDCARD; 1129 1130 /* prio ref the filter prio */ 1131 if (f->common.prio && f->common.prio <= BIT(3)) 1132 sfi->prio = f->common.prio - 1; 1133 else 1134 sfi->prio = ENETC_PSFP_WILDCARD; 1135 1136 old_sfi = enetc_psfp_check_sfi(sfi); 1137 if (!old_sfi) { 1138 int index; 1139 1140 index = enetc_get_free_index(priv); 1141 if (sfi->handle < 0) { 1142 NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!"); 1143 err = -ENOSPC; 1144 goto free_sfi; 1145 } 1146 1147 sfi->index = index; 1148 sfi->handle = index + HANDLE_OFFSET; 1149 /* Update the stream filter handle also */ 1150 filter->sid.handle = sfi->handle; 1151 filter->sfi_index = sfi->index; 1152 sfi_overwrite = 0; 1153 } else { 1154 filter->sfi_index = old_sfi->index; 1155 filter->sid.handle = old_sfi->handle; 1156 sfi_overwrite = 1; 1157 } 1158 1159 err = enetc_psfp_hw_set(priv, &filter->sid, 1160 sfi_overwrite ? NULL : sfi, sgi); 1161 if (err) 1162 goto free_sfi; 1163 1164 spin_lock(&epsfp.psfp_lock); 1165 /* Remove the old node if exist and update with a new node */ 1166 old_sgi = enetc_get_gate_by_index(filter->sgi_index); 1167 if (old_sgi) { 1168 refcount_set(&sgi->refcount, 1169 refcount_read(&old_sgi->refcount) + 1); 1170 hlist_del(&old_sgi->node); 1171 kfree(old_sgi); 1172 } 1173 1174 hlist_add_head(&sgi->node, &epsfp.psfp_gate_list); 1175 1176 if (!old_sfi) { 1177 hlist_add_head(&sfi->node, &epsfp.psfp_filter_list); 1178 set_bit(sfi->index, epsfp.psfp_sfi_bitmap); 1179 } else { 1180 kfree(sfi); 1181 refcount_inc(&old_sfi->refcount); 1182 } 1183 1184 old_filter = enetc_get_stream_by_index(filter->sid.index); 1185 if (old_filter) 1186 remove_one_chain(priv, old_filter); 1187 1188 filter->stats.lastused = jiffies; 1189 hlist_add_head(&filter->node, &epsfp.stream_list); 1190 1191 spin_unlock(&epsfp.psfp_lock); 1192 1193 return 0; 1194 1195 free_sfi: 1196 kfree(sfi); 1197 free_gate: 1198 kfree(sgi); 1199 free_filter: 1200 kfree(filter); 1201 1202 return err; 1203 } 1204 1205 static int enetc_config_clsflower(struct enetc_ndev_priv *priv, 1206 struct flow_cls_offload *cls_flower) 1207 { 1208 struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); 1209 struct netlink_ext_ack *extack = cls_flower->common.extack; 1210 struct flow_dissector *dissector = rule->match.dissector; 1211 struct flow_action *action = &rule->action; 1212 struct flow_action_entry *entry; 1213 struct actions_fwd *fwd; 1214 u64 actions = 0; 1215 int i, err; 1216 1217 if (!flow_action_has_entries(action)) { 1218 NL_SET_ERR_MSG_MOD(extack, "At least one action is needed"); 1219 return -EINVAL; 1220 } 1221 1222 flow_action_for_each(i, entry, action) 1223 actions |= BIT(entry->id); 1224 1225 fwd = enetc_check_flow_actions(actions, dissector->used_keys); 1226 if (!fwd) { 1227 NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!"); 1228 return -EOPNOTSUPP; 1229 } 1230 1231 if (fwd->output & FILTER_ACTION_TYPE_PSFP) { 1232 err = enetc_psfp_parse_clsflower(priv, cls_flower); 1233 if (err) { 1234 NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs"); 1235 return err; 1236 } 1237 } else { 1238 NL_SET_ERR_MSG_MOD(extack, "Unsupported actions"); 1239 return -EOPNOTSUPP; 1240 } 1241 1242 return 0; 1243 } 1244 1245 static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv, 1246 struct flow_cls_offload *f) 1247 { 1248 struct enetc_stream_filter *filter; 1249 struct netlink_ext_ack *extack = f->common.extack; 1250 int err; 1251 1252 if (f->common.chain_index >= priv->psfp_cap.max_streamid) { 1253 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!"); 1254 return -ENOSPC; 1255 } 1256 1257 filter = enetc_get_stream_by_index(f->common.chain_index); 1258 if (!filter) 1259 return -EINVAL; 1260 1261 err = enetc_streamid_hw_set(priv, &filter->sid, false); 1262 if (err) 1263 return err; 1264 1265 remove_one_chain(priv, filter); 1266 1267 return 0; 1268 } 1269 1270 static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv, 1271 struct flow_cls_offload *f) 1272 { 1273 return enetc_psfp_destroy_clsflower(priv, f); 1274 } 1275 1276 static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv, 1277 struct flow_cls_offload *f) 1278 { 1279 struct psfp_streamfilter_counters counters = {}; 1280 struct enetc_stream_filter *filter; 1281 struct flow_stats stats = {}; 1282 int err; 1283 1284 filter = enetc_get_stream_by_index(f->common.chain_index); 1285 if (!filter) 1286 return -EINVAL; 1287 1288 err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters); 1289 if (err) 1290 return -EINVAL; 1291 1292 spin_lock(&epsfp.psfp_lock); 1293 stats.pkts = counters.matching_frames_count - filter->stats.pkts; 1294 stats.lastused = filter->stats.lastused; 1295 filter->stats.pkts += stats.pkts; 1296 spin_unlock(&epsfp.psfp_lock); 1297 1298 flow_stats_update(&f->stats, 0x0, stats.pkts, stats.lastused, 1299 FLOW_ACTION_HW_STATS_DELAYED); 1300 1301 return 0; 1302 } 1303 1304 static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv, 1305 struct flow_cls_offload *cls_flower) 1306 { 1307 switch (cls_flower->command) { 1308 case FLOW_CLS_REPLACE: 1309 return enetc_config_clsflower(priv, cls_flower); 1310 case FLOW_CLS_DESTROY: 1311 return enetc_destroy_clsflower(priv, cls_flower); 1312 case FLOW_CLS_STATS: 1313 return enetc_psfp_get_stats(priv, cls_flower); 1314 default: 1315 return -EOPNOTSUPP; 1316 } 1317 } 1318 1319 static inline void clean_psfp_sfi_bitmap(void) 1320 { 1321 bitmap_free(epsfp.psfp_sfi_bitmap); 1322 epsfp.psfp_sfi_bitmap = NULL; 1323 } 1324 1325 static void clean_stream_list(void) 1326 { 1327 struct enetc_stream_filter *s; 1328 struct hlist_node *tmp; 1329 1330 hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) { 1331 hlist_del(&s->node); 1332 kfree(s); 1333 } 1334 } 1335 1336 static void clean_sfi_list(void) 1337 { 1338 struct enetc_psfp_filter *sfi; 1339 struct hlist_node *tmp; 1340 1341 hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) { 1342 hlist_del(&sfi->node); 1343 kfree(sfi); 1344 } 1345 } 1346 1347 static void clean_sgi_list(void) 1348 { 1349 struct enetc_psfp_gate *sgi; 1350 struct hlist_node *tmp; 1351 1352 hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) { 1353 hlist_del(&sgi->node); 1354 kfree(sgi); 1355 } 1356 } 1357 1358 static void clean_psfp_all(void) 1359 { 1360 /* Disable all list nodes and free all memory */ 1361 clean_sfi_list(); 1362 clean_sgi_list(); 1363 clean_stream_list(); 1364 epsfp.dev_bitmap = 0; 1365 clean_psfp_sfi_bitmap(); 1366 } 1367 1368 int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1369 void *cb_priv) 1370 { 1371 struct net_device *ndev = cb_priv; 1372 1373 if (!tc_can_offload(ndev)) 1374 return -EOPNOTSUPP; 1375 1376 switch (type) { 1377 case TC_SETUP_CLSFLOWER: 1378 return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data); 1379 default: 1380 return -EOPNOTSUPP; 1381 } 1382 } 1383 1384 int enetc_psfp_init(struct enetc_ndev_priv *priv) 1385 { 1386 if (epsfp.psfp_sfi_bitmap) 1387 return 0; 1388 1389 epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter, 1390 GFP_KERNEL); 1391 if (!epsfp.psfp_sfi_bitmap) 1392 return -ENOMEM; 1393 1394 spin_lock_init(&epsfp.psfp_lock); 1395 1396 if (list_empty(&enetc_block_cb_list)) 1397 epsfp.dev_bitmap = 0; 1398 1399 return 0; 1400 } 1401 1402 int enetc_psfp_clean(struct enetc_ndev_priv *priv) 1403 { 1404 if (!list_empty(&enetc_block_cb_list)) 1405 return -EBUSY; 1406 1407 clean_psfp_all(); 1408 1409 return 0; 1410 } 1411 1412 int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data) 1413 { 1414 struct enetc_ndev_priv *priv = netdev_priv(ndev); 1415 struct flow_block_offload *f = type_data; 1416 int err; 1417 1418 err = flow_block_cb_setup_simple(f, &enetc_block_cb_list, 1419 enetc_setup_tc_block_cb, 1420 ndev, ndev, true); 1421 if (err) 1422 return err; 1423 1424 switch (f->command) { 1425 case FLOW_BLOCK_BIND: 1426 set_bit(enetc_get_port(priv), &epsfp.dev_bitmap); 1427 break; 1428 case FLOW_BLOCK_UNBIND: 1429 clear_bit(enetc_get_port(priv), &epsfp.dev_bitmap); 1430 if (!epsfp.dev_bitmap) 1431 clean_psfp_all(); 1432 break; 1433 } 1434 1435 return 0; 1436 } 1437