1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ 2 /* Copyright 2014-2016 Freescale Semiconductor Inc. 3 * Copyright 2016-2020 NXP 4 */ 5 6 #ifndef __DPAA2_ETH_H 7 #define __DPAA2_ETH_H 8 9 #include <linux/dcbnl.h> 10 #include <linux/netdevice.h> 11 #include <linux/if_vlan.h> 12 #include <linux/fsl/mc.h> 13 #include <linux/net_tstamp.h> 14 15 #include <soc/fsl/dpaa2-io.h> 16 #include <soc/fsl/dpaa2-fd.h> 17 #include "dpni.h" 18 #include "dpni-cmd.h" 19 20 #include "dpaa2-eth-trace.h" 21 #include "dpaa2-eth-debugfs.h" 22 #include "dpaa2-mac.h" 23 24 #define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0) 25 26 #define DPAA2_ETH_STORE_SIZE 16 27 28 /* Maximum number of scatter-gather entries in an ingress frame, 29 * considering the maximum receive frame size is 64K 30 */ 31 #define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE) 32 33 /* Maximum acceptable MTU value. It is in direct relation with the hardware 34 * enforced Max Frame Length (currently 10k). 35 */ 36 #define DPAA2_ETH_MFL (10 * 1024) 37 #define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN) 38 /* Convert L3 MTU to L2 MFL */ 39 #define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN) 40 41 /* Set the taildrop threshold (in bytes) to allow the enqueue of a large 42 * enough number of jumbo frames in the Rx queues (length of the current 43 * frame is not taken into account when making the taildrop decision) 44 */ 45 #define DPAA2_ETH_FQ_TAILDROP_THRESH (1024 * 1024) 46 47 /* Maximum burst size value for Tx shaping */ 48 #define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF 49 50 /* Maximum number of Tx confirmation frames to be processed 51 * in a single NAPI call 52 */ 53 #define DPAA2_ETH_TXCONF_PER_NAPI 256 54 55 /* Buffer qouta per channel. We want to keep in check number of ingress frames 56 * in flight: for small sized frames, congestion group taildrop may kick in 57 * first; for large sizes, Rx FQ taildrop threshold will ensure only a 58 * reasonable number of frames will be pending at any given time. 59 * Ingress frame drop due to buffer pool depletion should be a corner case only 60 */ 61 #define DPAA2_ETH_NUM_BUFS 1280 62 #define DPAA2_ETH_REFILL_THRESH \ 63 (DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD) 64 65 /* Congestion group taildrop threshold: number of frames allowed to accumulate 66 * at any moment in a group of Rx queues belonging to the same traffic class. 67 * Choose value such that we don't risk depleting the buffer pool before the 68 * taildrop kicks in 69 */ 70 #define DPAA2_ETH_CG_TAILDROP_THRESH(priv) \ 71 (1024 * dpaa2_eth_queue_count(priv) / dpaa2_eth_tc_count(priv)) 72 73 /* Congestion group notification threshold: when this many frames accumulate 74 * on the Rx queues belonging to the same TC, the MAC is instructed to send 75 * PFC frames for that TC. 76 * When number of pending frames drops below exit threshold transmission of 77 * PFC frames is stopped. 78 */ 79 #define DPAA2_ETH_CN_THRESH_ENTRY(priv) \ 80 (DPAA2_ETH_CG_TAILDROP_THRESH(priv) / 2) 81 #define DPAA2_ETH_CN_THRESH_EXIT(priv) \ 82 (DPAA2_ETH_CN_THRESH_ENTRY(priv) * 3 / 4) 83 84 /* Maximum number of buffers that can be acquired/released through a single 85 * QBMan command 86 */ 87 #define DPAA2_ETH_BUFS_PER_CMD 7 88 89 /* Hardware requires alignment for ingress/egress buffer addresses */ 90 #define DPAA2_ETH_TX_BUF_ALIGN 64 91 92 #define DPAA2_ETH_RX_BUF_RAW_SIZE PAGE_SIZE 93 #define DPAA2_ETH_RX_BUF_TAILROOM \ 94 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 95 #define DPAA2_ETH_RX_BUF_SIZE \ 96 (DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM) 97 98 /* Hardware annotation area in RX/TX buffers */ 99 #define DPAA2_ETH_RX_HWA_SIZE 64 100 #define DPAA2_ETH_TX_HWA_SIZE 128 101 102 /* PTP nominal frequency 1GHz */ 103 #define DPAA2_PTP_CLK_PERIOD_NS 1 104 105 /* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned 106 * to 256B. For newer revisions, the requirement is only for 64B alignment 107 */ 108 #define DPAA2_ETH_RX_BUF_ALIGN_REV1 256 109 #define DPAA2_ETH_RX_BUF_ALIGN 64 110 111 /* We are accommodating a skb backpointer and some S/G info 112 * in the frame's software annotation. The hardware 113 * options are either 0 or 64, so we choose the latter. 114 */ 115 #define DPAA2_ETH_SWA_SIZE 64 116 117 /* We store different information in the software annotation area of a Tx frame 118 * based on what type of frame it is 119 */ 120 enum dpaa2_eth_swa_type { 121 DPAA2_ETH_SWA_SINGLE, 122 DPAA2_ETH_SWA_SG, 123 DPAA2_ETH_SWA_XDP, 124 }; 125 126 /* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ 127 struct dpaa2_eth_swa { 128 enum dpaa2_eth_swa_type type; 129 union { 130 struct { 131 struct sk_buff *skb; 132 int sgt_size; 133 } single; 134 struct { 135 struct sk_buff *skb; 136 struct scatterlist *scl; 137 int num_sg; 138 int sgt_size; 139 } sg; 140 struct { 141 int dma_size; 142 struct xdp_frame *xdpf; 143 } xdp; 144 }; 145 }; 146 147 /* Annotation valid bits in FD FRC */ 148 #define DPAA2_FD_FRC_FASV 0x8000 149 #define DPAA2_FD_FRC_FAEADV 0x4000 150 #define DPAA2_FD_FRC_FAPRV 0x2000 151 #define DPAA2_FD_FRC_FAIADV 0x1000 152 #define DPAA2_FD_FRC_FASWOV 0x0800 153 #define DPAA2_FD_FRC_FAICFDV 0x0400 154 155 /* Error bits in FD CTRL */ 156 #define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR) 157 #define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \ 158 FD_CTRL_SBE | \ 159 FD_CTRL_FSE | \ 160 FD_CTRL_FAERR) 161 162 /* Annotation bits in FD CTRL */ 163 #define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128B */ 164 165 /* Frame annotation status */ 166 struct dpaa2_fas { 167 u8 reserved; 168 u8 ppid; 169 __le16 ifpid; 170 __le32 status; 171 }; 172 173 /* Frame annotation status word is located in the first 8 bytes 174 * of the buffer's hardware annoatation area 175 */ 176 #define DPAA2_FAS_OFFSET 0 177 #define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas)) 178 179 /* Timestamp is located in the next 8 bytes of the buffer's 180 * hardware annotation area 181 */ 182 #define DPAA2_TS_OFFSET 0x8 183 184 /* Frame annotation egress action descriptor */ 185 #define DPAA2_FAEAD_OFFSET 0x58 186 187 struct dpaa2_faead { 188 __le32 conf_fqid; 189 __le32 ctrl; 190 }; 191 192 #define DPAA2_FAEAD_A2V 0x20000000 193 #define DPAA2_FAEAD_A4V 0x08000000 194 #define DPAA2_FAEAD_UPDV 0x00001000 195 #define DPAA2_FAEAD_EBDDV 0x00002000 196 #define DPAA2_FAEAD_UPD 0x00000010 197 198 struct ptp_tstamp { 199 u16 sec_msb; 200 u32 sec_lsb; 201 u32 nsec; 202 }; 203 204 static inline void ns_to_ptp_tstamp(struct ptp_tstamp *tstamp, u64 ns) 205 { 206 u64 sec, nsec; 207 208 sec = ns; 209 nsec = do_div(sec, 1000000000); 210 211 tstamp->sec_lsb = sec & 0xFFFFFFFF; 212 tstamp->sec_msb = (sec >> 32) & 0xFFFF; 213 tstamp->nsec = nsec; 214 } 215 216 /* Accessors for the hardware annotation fields that we use */ 217 static inline void *dpaa2_get_hwa(void *buf_addr, bool swa) 218 { 219 return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0); 220 } 221 222 static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa) 223 { 224 return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET; 225 } 226 227 static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa) 228 { 229 return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET; 230 } 231 232 static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa) 233 { 234 return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET; 235 } 236 237 /* Error and status bits in the frame annotation status word */ 238 /* Debug frame, otherwise supposed to be discarded */ 239 #define DPAA2_FAS_DISC 0x80000000 240 /* MACSEC frame */ 241 #define DPAA2_FAS_MS 0x40000000 242 #define DPAA2_FAS_PTP 0x08000000 243 /* Ethernet multicast frame */ 244 #define DPAA2_FAS_MC 0x04000000 245 /* Ethernet broadcast frame */ 246 #define DPAA2_FAS_BC 0x02000000 247 #define DPAA2_FAS_KSE 0x00040000 248 #define DPAA2_FAS_EOFHE 0x00020000 249 #define DPAA2_FAS_MNLE 0x00010000 250 #define DPAA2_FAS_TIDE 0x00008000 251 #define DPAA2_FAS_PIEE 0x00004000 252 /* Frame length error */ 253 #define DPAA2_FAS_FLE 0x00002000 254 /* Frame physical error */ 255 #define DPAA2_FAS_FPE 0x00001000 256 #define DPAA2_FAS_PTE 0x00000080 257 #define DPAA2_FAS_ISP 0x00000040 258 #define DPAA2_FAS_PHE 0x00000020 259 #define DPAA2_FAS_BLE 0x00000010 260 /* L3 csum validation performed */ 261 #define DPAA2_FAS_L3CV 0x00000008 262 /* L3 csum error */ 263 #define DPAA2_FAS_L3CE 0x00000004 264 /* L4 csum validation performed */ 265 #define DPAA2_FAS_L4CV 0x00000002 266 /* L4 csum error */ 267 #define DPAA2_FAS_L4CE 0x00000001 268 /* Possible errors on the ingress path */ 269 #define DPAA2_FAS_RX_ERR_MASK (DPAA2_FAS_KSE | \ 270 DPAA2_FAS_EOFHE | \ 271 DPAA2_FAS_MNLE | \ 272 DPAA2_FAS_TIDE | \ 273 DPAA2_FAS_PIEE | \ 274 DPAA2_FAS_FLE | \ 275 DPAA2_FAS_FPE | \ 276 DPAA2_FAS_PTE | \ 277 DPAA2_FAS_ISP | \ 278 DPAA2_FAS_PHE | \ 279 DPAA2_FAS_BLE | \ 280 DPAA2_FAS_L3CE | \ 281 DPAA2_FAS_L4CE) 282 283 /* Time in milliseconds between link state updates */ 284 #define DPAA2_ETH_LINK_STATE_REFRESH 1000 285 286 /* Number of times to retry a frame enqueue before giving up. 287 * Value determined empirically, in order to minimize the number 288 * of frames dropped on Tx 289 */ 290 #define DPAA2_ETH_ENQUEUE_RETRIES 10 291 292 /* Number of times to retry DPIO portal operations while waiting 293 * for portal to finish executing current command and become 294 * available. We want to avoid being stuck in a while loop in case 295 * hardware becomes unresponsive, but not give up too easily if 296 * the portal really is busy for valid reasons 297 */ 298 #define DPAA2_ETH_SWP_BUSY_RETRIES 1000 299 300 /* Driver statistics, other than those in struct rtnl_link_stats64. 301 * These are usually collected per-CPU and aggregated by ethtool. 302 */ 303 struct dpaa2_eth_drv_stats { 304 __u64 tx_conf_frames; 305 __u64 tx_conf_bytes; 306 __u64 tx_sg_frames; 307 __u64 tx_sg_bytes; 308 __u64 rx_sg_frames; 309 __u64 rx_sg_bytes; 310 /* Linear skbs sent as a S/G FD due to insufficient headroom */ 311 __u64 tx_converted_sg_frames; 312 __u64 tx_converted_sg_bytes; 313 /* Enqueues retried due to portal busy */ 314 __u64 tx_portal_busy; 315 }; 316 317 /* Per-FQ statistics */ 318 struct dpaa2_eth_fq_stats { 319 /* Number of frames received on this queue */ 320 __u64 frames; 321 }; 322 323 /* Per-channel statistics */ 324 struct dpaa2_eth_ch_stats { 325 /* Volatile dequeues retried due to portal busy */ 326 __u64 dequeue_portal_busy; 327 /* Pull errors */ 328 __u64 pull_err; 329 /* Number of CDANs; useful to estimate avg NAPI len */ 330 __u64 cdan; 331 /* XDP counters */ 332 __u64 xdp_drop; 333 __u64 xdp_tx; 334 __u64 xdp_tx_err; 335 __u64 xdp_redirect; 336 /* Must be last, does not show up in ethtool stats */ 337 __u64 frames; 338 }; 339 340 /* Maximum number of queues associated with a DPNI */ 341 #define DPAA2_ETH_MAX_TCS 8 342 #define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16 343 #define DPAA2_ETH_MAX_RX_QUEUES \ 344 (DPAA2_ETH_MAX_RX_QUEUES_PER_TC * DPAA2_ETH_MAX_TCS) 345 #define DPAA2_ETH_MAX_TX_QUEUES 16 346 #define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ 347 DPAA2_ETH_MAX_TX_QUEUES) 348 #define DPAA2_ETH_MAX_NETDEV_QUEUES \ 349 (DPAA2_ETH_MAX_TX_QUEUES * DPAA2_ETH_MAX_TCS) 350 351 #define DPAA2_ETH_MAX_DPCONS 16 352 353 enum dpaa2_eth_fq_type { 354 DPAA2_RX_FQ = 0, 355 DPAA2_TX_CONF_FQ, 356 }; 357 358 struct dpaa2_eth_priv; 359 360 struct dpaa2_eth_xdp_fds { 361 struct dpaa2_fd fds[DEV_MAP_BULK_SIZE]; 362 ssize_t num; 363 }; 364 365 struct dpaa2_eth_fq { 366 u32 fqid; 367 u32 tx_qdbin; 368 u32 tx_fqid[DPAA2_ETH_MAX_TCS]; 369 u16 flowid; 370 u8 tc; 371 int target_cpu; 372 u32 dq_frames; 373 u32 dq_bytes; 374 struct dpaa2_eth_channel *channel; 375 enum dpaa2_eth_fq_type type; 376 377 void (*consume)(struct dpaa2_eth_priv *priv, 378 struct dpaa2_eth_channel *ch, 379 const struct dpaa2_fd *fd, 380 struct dpaa2_eth_fq *fq); 381 struct dpaa2_eth_fq_stats stats; 382 383 struct dpaa2_eth_xdp_fds xdp_redirect_fds; 384 struct dpaa2_eth_xdp_fds xdp_tx_fds; 385 }; 386 387 struct dpaa2_eth_ch_xdp { 388 struct bpf_prog *prog; 389 u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD]; 390 int drop_cnt; 391 unsigned int res; 392 }; 393 394 struct dpaa2_eth_channel { 395 struct dpaa2_io_notification_ctx nctx; 396 struct fsl_mc_device *dpcon; 397 int dpcon_id; 398 int ch_id; 399 struct napi_struct napi; 400 struct dpaa2_io *dpio; 401 struct dpaa2_io_store *store; 402 struct dpaa2_eth_priv *priv; 403 int buf_count; 404 struct dpaa2_eth_ch_stats stats; 405 struct dpaa2_eth_ch_xdp xdp; 406 struct xdp_rxq_info xdp_rxq; 407 struct list_head *rx_list; 408 }; 409 410 struct dpaa2_eth_dist_fields { 411 u64 rxnfc_field; 412 enum net_prot cls_prot; 413 int cls_field; 414 int size; 415 u64 id; 416 }; 417 418 struct dpaa2_eth_cls_rule { 419 struct ethtool_rx_flow_spec fs; 420 u8 in_use; 421 }; 422 423 #define DPAA2_ETH_SGT_CACHE_SIZE 256 424 struct dpaa2_eth_sgt_cache { 425 void *buf[DPAA2_ETH_SGT_CACHE_SIZE]; 426 u16 count; 427 }; 428 429 /* Driver private data */ 430 struct dpaa2_eth_priv { 431 struct net_device *net_dev; 432 433 u8 num_fqs; 434 struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; 435 int (*enqueue)(struct dpaa2_eth_priv *priv, 436 struct dpaa2_eth_fq *fq, 437 struct dpaa2_fd *fd, u8 prio, 438 u32 num_frames, 439 int *frames_enqueued); 440 441 u8 num_channels; 442 struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; 443 struct dpaa2_eth_sgt_cache __percpu *sgt_cache; 444 445 struct dpni_attr dpni_attrs; 446 u16 dpni_ver_major; 447 u16 dpni_ver_minor; 448 u16 tx_data_offset; 449 450 struct fsl_mc_device *dpbp_dev; 451 u16 rx_buf_size; 452 u16 bpid; 453 struct iommu_domain *iommu_domain; 454 455 enum hwtstamp_tx_types tx_tstamp_type; /* Tx timestamping type */ 456 bool rx_tstamp; /* Rx timestamping enabled */ 457 458 u16 tx_qdid; 459 struct fsl_mc_io *mc_io; 460 /* Cores which have an affine DPIO/DPCON. 461 * This is the cpu set on which Rx and Tx conf frames are processed 462 */ 463 struct cpumask dpio_cpumask; 464 465 /* Standard statistics */ 466 struct rtnl_link_stats64 __percpu *percpu_stats; 467 /* Extra stats, in addition to the ones known by the kernel */ 468 struct dpaa2_eth_drv_stats __percpu *percpu_extras; 469 470 u16 mc_token; 471 u8 rx_fqtd_enabled; 472 u8 rx_cgtd_enabled; 473 474 struct dpni_link_state link_state; 475 bool do_link_poll; 476 struct task_struct *poll_thread; 477 478 /* enabled ethtool hashing bits */ 479 u64 rx_hash_fields; 480 u64 rx_cls_fields; 481 struct dpaa2_eth_cls_rule *cls_rules; 482 u8 rx_cls_enabled; 483 u8 vlan_cls_enabled; 484 u8 pfc_enabled; 485 #ifdef CONFIG_FSL_DPAA2_ETH_DCB 486 u8 dcbx_mode; 487 struct ieee_pfc pfc; 488 #endif 489 struct bpf_prog *xdp_prog; 490 #ifdef CONFIG_DEBUG_FS 491 struct dpaa2_debugfs dbg; 492 #endif 493 494 struct dpaa2_mac *mac; 495 struct workqueue_struct *dpaa2_ptp_wq; 496 struct work_struct tx_onestep_tstamp; 497 struct sk_buff_head tx_skbs; 498 /* The one-step timestamping configuration on hardware 499 * registers could only be done when no one-step 500 * timestamping frames are in flight. So we use a mutex 501 * lock here to make sure the lock is released by last 502 * one-step timestamping packet through TX confirmation 503 * queue before transmit current packet. 504 */ 505 struct mutex onestep_tstamp_lock; 506 }; 507 508 #define TX_TSTAMP 0x1 509 #define TX_TSTAMP_ONESTEP_SYNC 0x2 510 511 #define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ 512 | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \ 513 | RXH_L4_B_2_3) 514 515 /* default Rx hash options, set during probing */ 516 #define DPAA2_RXH_DEFAULT (RXH_L3_PROTO | RXH_IP_SRC | RXH_IP_DST | \ 517 RXH_L4_B_0_1 | RXH_L4_B_2_3) 518 519 #define dpaa2_eth_hash_enabled(priv) \ 520 ((priv)->dpni_attrs.num_queues > 1) 521 522 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */ 523 #define DPAA2_CLASSIFIER_DMA_SIZE 256 524 525 extern const struct ethtool_ops dpaa2_ethtool_ops; 526 extern int dpaa2_phc_index; 527 extern struct ptp_qoriq *dpaa2_ptp; 528 529 static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv, 530 u16 ver_major, u16 ver_minor) 531 { 532 if (priv->dpni_ver_major == ver_major) 533 return priv->dpni_ver_minor - ver_minor; 534 return priv->dpni_ver_major - ver_major; 535 } 536 537 /* Minimum firmware version that supports a more flexible API 538 * for configuring the Rx flow hash key 539 */ 540 #define DPNI_RX_DIST_KEY_VER_MAJOR 7 541 #define DPNI_RX_DIST_KEY_VER_MINOR 5 542 543 #define dpaa2_eth_has_legacy_dist(priv) \ 544 (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \ 545 DPNI_RX_DIST_KEY_VER_MINOR) < 0) 546 547 #define dpaa2_eth_fs_enabled(priv) \ 548 (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS)) 549 550 #define dpaa2_eth_fs_mask_enabled(priv) \ 551 ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING) 552 553 #define dpaa2_eth_fs_count(priv) \ 554 ((priv)->dpni_attrs.fs_entries) 555 556 #define dpaa2_eth_tc_count(priv) \ 557 ((priv)->dpni_attrs.num_tcs) 558 559 /* We have exactly one {Rx, Tx conf} queue per channel */ 560 #define dpaa2_eth_queue_count(priv) \ 561 ((priv)->num_channels) 562 563 enum dpaa2_eth_rx_dist { 564 DPAA2_ETH_RX_DIST_HASH, 565 DPAA2_ETH_RX_DIST_CLS 566 }; 567 568 /* Unique IDs for the supported Rx classification header fields */ 569 #define DPAA2_ETH_DIST_ETHDST BIT(0) 570 #define DPAA2_ETH_DIST_ETHSRC BIT(1) 571 #define DPAA2_ETH_DIST_ETHTYPE BIT(2) 572 #define DPAA2_ETH_DIST_VLAN BIT(3) 573 #define DPAA2_ETH_DIST_IPSRC BIT(4) 574 #define DPAA2_ETH_DIST_IPDST BIT(5) 575 #define DPAA2_ETH_DIST_IPPROTO BIT(6) 576 #define DPAA2_ETH_DIST_L4SRC BIT(7) 577 #define DPAA2_ETH_DIST_L4DST BIT(8) 578 #define DPAA2_ETH_DIST_ALL (~0ULL) 579 580 #define DPNI_PAUSE_VER_MAJOR 7 581 #define DPNI_PAUSE_VER_MINOR 13 582 #define dpaa2_eth_has_pause_support(priv) \ 583 (dpaa2_eth_cmp_dpni_ver((priv), DPNI_PAUSE_VER_MAJOR, \ 584 DPNI_PAUSE_VER_MINOR) >= 0) 585 586 static inline bool dpaa2_eth_tx_pause_enabled(u64 link_options) 587 { 588 return !!(link_options & DPNI_LINK_OPT_PAUSE) ^ 589 !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE); 590 } 591 592 static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options) 593 { 594 return !!(link_options & DPNI_LINK_OPT_PAUSE); 595 } 596 597 static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb) 598 { 599 unsigned int headroom = DPAA2_ETH_SWA_SIZE; 600 601 /* If we don't have an skb (e.g. XDP buffer), we only need space for 602 * the software annotation area 603 */ 604 if (!skb) 605 return headroom; 606 607 /* For non-linear skbs we have no headroom requirement, as we build a 608 * SG frame with a newly allocated SGT buffer 609 */ 610 if (skb_is_nonlinear(skb)) 611 return 0; 612 613 /* If we have Tx timestamping, need 128B hardware annotation */ 614 if (skb->cb[0]) 615 headroom += DPAA2_ETH_TX_HWA_SIZE; 616 617 return headroom; 618 } 619 620 /* Extra headroom space requested to hardware, in order to make sure there's 621 * no realloc'ing in forwarding scenarios 622 */ 623 static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv) 624 { 625 return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE; 626 } 627 628 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); 629 int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key); 630 int dpaa2_eth_cls_key_size(u64 key); 631 int dpaa2_eth_cls_fld_off(int prot, int field); 632 void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields); 633 634 void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, 635 bool tx_pause, bool pfc); 636 637 extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops; 638 639 #endif /* __DPAA2_H */ 640