1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ 2 /* QLogic qede NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #ifndef _QEDE_H_ 8 #define _QEDE_H_ 9 #include <linux/compiler.h> 10 #include <linux/version.h> 11 #include <linux/workqueue.h> 12 #include <linux/netdevice.h> 13 #include <linux/interrupt.h> 14 #include <linux/bitmap.h> 15 #include <linux/kernel.h> 16 #include <linux/mutex.h> 17 #include <linux/bpf.h> 18 #include <net/xdp.h> 19 #include <linux/qed/qede_rdma.h> 20 #include <linux/io.h> 21 #ifdef CONFIG_RFS_ACCEL 22 #include <linux/cpu_rmap.h> 23 #endif 24 #include <linux/qed/common_hsi.h> 25 #include <linux/qed/eth_common.h> 26 #include <linux/qed/qed_if.h> 27 #include <linux/qed/qed_chain.h> 28 #include <linux/qed/qed_eth_if.h> 29 30 #include <net/pkt_cls.h> 31 #include <net/tc_act/tc_gact.h> 32 33 #define QEDE_MAJOR_VERSION 8 34 #define QEDE_MINOR_VERSION 37 35 #define QEDE_REVISION_VERSION 0 36 #define QEDE_ENGINEERING_VERSION 20 37 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ 38 __stringify(QEDE_MINOR_VERSION) "." \ 39 __stringify(QEDE_REVISION_VERSION) "." \ 40 __stringify(QEDE_ENGINEERING_VERSION) 41 42 #define DRV_MODULE_SYM qede 43 44 struct qede_stats_common { 45 u64 no_buff_discards; 46 u64 packet_too_big_discard; 47 u64 ttl0_discard; 48 u64 rx_ucast_bytes; 49 u64 rx_mcast_bytes; 50 u64 rx_bcast_bytes; 51 u64 rx_ucast_pkts; 52 u64 rx_mcast_pkts; 53 u64 rx_bcast_pkts; 54 u64 mftag_filter_discards; 55 u64 mac_filter_discards; 56 u64 gft_filter_drop; 57 u64 tx_ucast_bytes; 58 u64 tx_mcast_bytes; 59 u64 tx_bcast_bytes; 60 u64 tx_ucast_pkts; 61 u64 tx_mcast_pkts; 62 u64 tx_bcast_pkts; 63 u64 tx_err_drop_pkts; 64 u64 coalesced_pkts; 65 u64 coalesced_events; 66 u64 coalesced_aborts_num; 67 u64 non_coalesced_pkts; 68 u64 coalesced_bytes; 69 u64 link_change_count; 70 u64 ptp_skip_txts; 71 72 /* port */ 73 u64 rx_64_byte_packets; 74 u64 rx_65_to_127_byte_packets; 75 u64 rx_128_to_255_byte_packets; 76 u64 rx_256_to_511_byte_packets; 77 u64 rx_512_to_1023_byte_packets; 78 u64 rx_1024_to_1518_byte_packets; 79 u64 rx_crc_errors; 80 u64 rx_mac_crtl_frames; 81 u64 rx_pause_frames; 82 u64 rx_pfc_frames; 83 u64 rx_align_errors; 84 u64 rx_carrier_errors; 85 u64 rx_oversize_packets; 86 u64 rx_jabbers; 87 u64 rx_undersize_packets; 88 u64 rx_fragments; 89 u64 tx_64_byte_packets; 90 u64 tx_65_to_127_byte_packets; 91 u64 tx_128_to_255_byte_packets; 92 u64 tx_256_to_511_byte_packets; 93 u64 tx_512_to_1023_byte_packets; 94 u64 tx_1024_to_1518_byte_packets; 95 u64 tx_pause_frames; 96 u64 tx_pfc_frames; 97 u64 brb_truncates; 98 u64 brb_discards; 99 u64 tx_mac_ctrl_frames; 100 }; 101 102 struct qede_stats_bb { 103 u64 rx_1519_to_1522_byte_packets; 104 u64 rx_1519_to_2047_byte_packets; 105 u64 rx_2048_to_4095_byte_packets; 106 u64 rx_4096_to_9216_byte_packets; 107 u64 rx_9217_to_16383_byte_packets; 108 u64 tx_1519_to_2047_byte_packets; 109 u64 tx_2048_to_4095_byte_packets; 110 u64 tx_4096_to_9216_byte_packets; 111 u64 tx_9217_to_16383_byte_packets; 112 u64 tx_lpi_entry_count; 113 u64 tx_total_collisions; 114 }; 115 116 struct qede_stats_ah { 117 u64 rx_1519_to_max_byte_packets; 118 u64 tx_1519_to_max_byte_packets; 119 }; 120 121 struct qede_stats { 122 struct qede_stats_common common; 123 124 union { 125 struct qede_stats_bb bb; 126 struct qede_stats_ah ah; 127 }; 128 }; 129 130 struct qede_vlan { 131 struct list_head list; 132 u16 vid; 133 bool configured; 134 }; 135 136 struct qede_rdma_dev { 137 struct qedr_dev *qedr_dev; 138 struct list_head entry; 139 struct list_head rdma_event_list; 140 struct workqueue_struct *rdma_wq; 141 struct kref refcnt; 142 struct completion event_comp; 143 bool exp_recovery; 144 }; 145 146 struct qede_ptp; 147 148 #define QEDE_RFS_MAX_FLTR 256 149 150 enum qede_flags_bit { 151 QEDE_FLAGS_IS_VF = 0, 152 QEDE_FLAGS_LINK_REQUESTED, 153 QEDE_FLAGS_PTP_TX_IN_PRORGESS, 154 QEDE_FLAGS_TX_TIMESTAMPING_EN 155 }; 156 157 #define QEDE_DUMP_MAX_ARGS 4 158 enum qede_dump_cmd { 159 QEDE_DUMP_CMD_NONE = 0, 160 QEDE_DUMP_CMD_NVM_CFG, 161 QEDE_DUMP_CMD_GRCDUMP, 162 QEDE_DUMP_CMD_MAX 163 }; 164 165 struct qede_dump_info { 166 enum qede_dump_cmd cmd; 167 u8 num_args; 168 u32 args[QEDE_DUMP_MAX_ARGS]; 169 }; 170 171 struct qede_dev { 172 struct qed_dev *cdev; 173 struct net_device *ndev; 174 struct pci_dev *pdev; 175 176 u32 dp_module; 177 u8 dp_level; 178 179 unsigned long flags; 180 #define IS_VF(edev) test_bit(QEDE_FLAGS_IS_VF, \ 181 &(edev)->flags) 182 183 const struct qed_eth_ops *ops; 184 struct qede_ptp *ptp; 185 u64 ptp_skip_txts; 186 187 struct qed_dev_eth_info dev_info; 188 #define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) 189 #define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues) 190 #define QEDE_IS_BB(edev) \ 191 ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB) 192 #define QEDE_IS_AH(edev) \ 193 ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH) 194 195 struct qede_fastpath *fp_array; 196 u8 req_num_tx; 197 u8 fp_num_tx; 198 u8 req_num_rx; 199 u8 fp_num_rx; 200 u16 req_queues; 201 u16 num_queues; 202 u16 total_xdp_queues; 203 204 #define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) 205 #define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx) 206 #define QEDE_RX_QUEUE_IDX(edev, i) (i) 207 #define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx) 208 209 struct qed_int_info int_info; 210 211 /* Smaller private variant of the RTNL lock */ 212 struct mutex qede_lock; 213 u32 state; /* Protected by qede_lock */ 214 u16 rx_buf_size; 215 u32 rx_copybreak; 216 217 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ 218 #define ETH_OVERHEAD (ETH_HLEN + 8 + 8) 219 /* Max supported alignment is 256 (8 shift) 220 * minimal alignment shift 6 is optimal for 57xxx HW performance 221 */ 222 #define QEDE_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT)) 223 /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes 224 * at the end of skb->data, to avoid wasting a full cache line. 225 * This reduces memory use (skb->truesize). 226 */ 227 #define QEDE_FW_RX_ALIGN_END \ 228 max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT, \ 229 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 230 231 struct qede_stats stats; 232 233 /* Bitfield to track initialized RSS params */ 234 u32 rss_params_inited; 235 #define QEDE_RSS_INDIR_INITED BIT(0) 236 #define QEDE_RSS_KEY_INITED BIT(1) 237 #define QEDE_RSS_CAPS_INITED BIT(2) 238 239 u16 rss_ind_table[128]; 240 u32 rss_key[10]; 241 u8 rss_caps; 242 243 /* Both must be a power of two */ 244 u16 q_num_rx_buffers; 245 u16 q_num_tx_buffers; 246 247 bool gro_disable; 248 249 struct list_head vlan_list; 250 u16 configured_vlans; 251 u16 non_configured_vlans; 252 bool accept_any_vlan; 253 254 struct delayed_work sp_task; 255 unsigned long sp_flags; 256 u16 vxlan_dst_port; 257 u16 geneve_dst_port; 258 259 struct qede_arfs *arfs; 260 bool wol_enabled; 261 262 struct qede_rdma_dev rdma_info; 263 264 struct bpf_prog *xdp_prog; 265 266 unsigned long err_flags; 267 #define QEDE_ERR_IS_HANDLED 31 268 #define QEDE_ERR_ATTN_CLR_EN 0 269 #define QEDE_ERR_GET_DBG_INFO 1 270 #define QEDE_ERR_IS_RECOVERABLE 2 271 #define QEDE_ERR_WARN 3 272 273 struct qede_dump_info dump_info; 274 }; 275 276 enum QEDE_STATE { 277 QEDE_STATE_CLOSED, 278 QEDE_STATE_OPEN, 279 QEDE_STATE_RECOVERY, 280 }; 281 282 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) 283 284 #define MAX_NUM_TC 8 285 #define MAX_NUM_PRI 8 286 287 /* The driver supports the new build_skb() API: 288 * RX ring buffer contains pointer to kmalloc() data only, 289 * skb are built only after the frame was DMA-ed. 290 */ 291 struct sw_rx_data { 292 struct page *data; 293 dma_addr_t mapping; 294 unsigned int page_offset; 295 }; 296 297 enum qede_agg_state { 298 QEDE_AGG_STATE_NONE = 0, 299 QEDE_AGG_STATE_START = 1, 300 QEDE_AGG_STATE_ERROR = 2 301 }; 302 303 struct qede_agg_info { 304 /* rx_buf is a data buffer that can be placed / consumed from rx bd 305 * chain. It has two purposes: We will preallocate the data buffer 306 * for each aggregation when we open the interface and will place this 307 * buffer on the rx-bd-ring when we receive TPA_START. We don't want 308 * to be in a state where allocation fails, as we can't reuse the 309 * consumer buffer in the rx-chain since FW may still be writing to it 310 * (since header needs to be modified for TPA). 311 * The second purpose is to keep a pointer to the bd buffer during 312 * aggregation. 313 */ 314 struct sw_rx_data buffer; 315 struct sk_buff *skb; 316 317 /* We need some structs from the start cookie until termination */ 318 u16 vlan_tag; 319 320 bool tpa_start_fail; 321 u8 state; 322 u8 frag_id; 323 324 u8 tunnel_type; 325 }; 326 327 struct qede_rx_queue { 328 __le16 *hw_cons_ptr; 329 void __iomem *hw_rxq_prod_addr; 330 331 /* Required for the allocation of replacement buffers */ 332 struct device *dev; 333 334 struct bpf_prog *xdp_prog; 335 336 u16 sw_rx_cons; 337 u16 sw_rx_prod; 338 339 u16 filled_buffers; 340 u8 data_direction; 341 u8 rxq_id; 342 343 /* Used once per each NAPI run */ 344 u16 num_rx_buffers; 345 346 u16 rx_headroom; 347 348 u32 rx_buf_size; 349 u32 rx_buf_seg_size; 350 351 struct sw_rx_data *sw_rx_ring; 352 struct qed_chain rx_bd_ring; 353 struct qed_chain rx_comp_ring ____cacheline_aligned; 354 355 /* GRO */ 356 struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; 357 358 /* Used once per each NAPI run */ 359 u64 rcv_pkts; 360 361 u64 rx_hw_errors; 362 u64 rx_alloc_errors; 363 u64 rx_ip_frags; 364 365 u64 xdp_no_pass; 366 367 void *handle; 368 struct xdp_rxq_info xdp_rxq; 369 }; 370 371 union db_prod { 372 struct eth_db_data data; 373 u32 raw; 374 }; 375 376 struct sw_tx_bd { 377 struct sk_buff *skb; 378 u8 flags; 379 /* Set on the first BD descriptor when there is a split BD */ 380 #define QEDE_TSO_SPLIT_BD BIT(0) 381 }; 382 383 struct sw_tx_xdp { 384 struct page *page; 385 struct xdp_frame *xdpf; 386 dma_addr_t mapping; 387 }; 388 389 struct qede_tx_queue { 390 u8 is_xdp; 391 bool is_legacy; 392 u16 sw_tx_cons; 393 u16 sw_tx_prod; 394 u16 num_tx_buffers; /* Slowpath only */ 395 396 u64 xmit_pkts; 397 u64 stopped_cnt; 398 u64 tx_mem_alloc_err; 399 400 __le16 *hw_cons_ptr; 401 402 /* Needed for the mapping of packets */ 403 struct device *dev; 404 405 void __iomem *doorbell_addr; 406 union db_prod tx_db; 407 408 /* Spinlock for XDP queues in case of XDP_REDIRECT */ 409 spinlock_t xdp_tx_lock; 410 411 int index; /* Slowpath only */ 412 #define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \ 413 QEDE_MAX_TSS_CNT(edev)) 414 #define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev)) 415 #define QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx) ((edev)->fp_num_rx + \ 416 ((idx) % QEDE_TSS_COUNT(edev))) 417 #define QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx) ((idx) / QEDE_TSS_COUNT(edev)) 418 #define QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq) ((QEDE_TSS_COUNT(edev) * \ 419 (txq)->cos) + (txq)->index) 420 #define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx) \ 421 (&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \ 422 [QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)])) 423 #define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0])) 424 425 /* Regular Tx requires skb + metadata for release purpose, 426 * while XDP requires the pages and the mapped address. 427 */ 428 union { 429 struct sw_tx_bd *skbs; 430 struct sw_tx_xdp *xdp; 431 } sw_tx_ring; 432 433 struct qed_chain tx_pbl; 434 435 /* Slowpath; Should be kept in end [unless missing padding] */ 436 void *handle; 437 u16 cos; 438 u16 ndev_txq_id; 439 }; 440 441 #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \ 442 le32_to_cpu((bd)->addr.lo)) 443 #define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \ 444 do { \ 445 (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr)); \ 446 (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr)); \ 447 (bd)->nbytes = cpu_to_le16(len); \ 448 } while (0) 449 #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) 450 451 struct qede_fastpath { 452 struct qede_dev *edev; 453 454 u8 type; 455 #define QEDE_FASTPATH_TX BIT(0) 456 #define QEDE_FASTPATH_RX BIT(1) 457 #define QEDE_FASTPATH_XDP BIT(2) 458 #define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX) 459 460 u8 id; 461 462 u8 xdp_xmit; 463 #define QEDE_XDP_TX BIT(0) 464 #define QEDE_XDP_REDIRECT BIT(1) 465 466 struct napi_struct napi; 467 struct qed_sb_info *sb_info; 468 struct qede_rx_queue *rxq; 469 struct qede_tx_queue *txq; 470 struct qede_tx_queue *xdp_tx; 471 472 char name[IFNAMSIZ + 8]; 473 }; 474 475 /* Debug print definitions */ 476 #define DP_NAME(edev) netdev_name((edev)->ndev) 477 478 #define XMIT_PLAIN 0 479 #define XMIT_L4_CSUM BIT(0) 480 #define XMIT_LSO BIT(1) 481 #define XMIT_ENC BIT(2) 482 #define XMIT_ENC_GSO_L4_CSUM BIT(3) 483 484 #define QEDE_CSUM_ERROR BIT(0) 485 #define QEDE_CSUM_UNNECESSARY BIT(1) 486 #define QEDE_TUNN_CSUM_UNNECESSARY BIT(2) 487 488 #define QEDE_SP_RECOVERY 0 489 #define QEDE_SP_RX_MODE 1 490 #define QEDE_SP_RSVD1 2 491 #define QEDE_SP_RSVD2 3 492 #define QEDE_SP_HW_ERR 4 493 #define QEDE_SP_ARFS_CONFIG 5 494 #define QEDE_SP_AER 7 495 496 #ifdef CONFIG_RFS_ACCEL 497 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 498 u16 rxq_index, u32 flow_id); 499 #define QEDE_SP_TASK_POLL_DELAY (5 * HZ) 500 #endif 501 502 void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr); 503 void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev); 504 void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc); 505 void qede_free_arfs(struct qede_dev *edev); 506 int qede_alloc_arfs(struct qede_dev *edev); 507 int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info); 508 int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie); 509 int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd); 510 int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, 511 u32 *rule_locs); 512 int qede_get_arfs_filter_count(struct qede_dev *edev); 513 514 struct qede_reload_args { 515 void (*func)(struct qede_dev *edev, struct qede_reload_args *args); 516 union { 517 netdev_features_t features; 518 struct bpf_prog *new_prog; 519 u16 mtu; 520 } u; 521 }; 522 523 /* Datapath functions definition */ 524 netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); 525 int qede_xdp_transmit(struct net_device *dev, int n_frames, 526 struct xdp_frame **frames, u32 flags); 527 u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, 528 struct net_device *sb_dev); 529 netdev_features_t qede_features_check(struct sk_buff *skb, 530 struct net_device *dev, 531 netdev_features_t features); 532 int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy); 533 int qede_free_tx_pkt(struct qede_dev *edev, 534 struct qede_tx_queue *txq, int *len); 535 int qede_poll(struct napi_struct *napi, int budget); 536 irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie); 537 538 /* Filtering function definitions */ 539 void qede_force_mac(void *dev, u8 *mac, bool forced); 540 void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port); 541 int qede_set_mac_addr(struct net_device *ndev, void *p); 542 543 int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid); 544 int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid); 545 void qede_vlan_mark_nonconfigured(struct qede_dev *edev); 546 int qede_configure_vlan_filters(struct qede_dev *edev); 547 548 netdev_features_t qede_fix_features(struct net_device *dev, 549 netdev_features_t features); 550 int qede_set_features(struct net_device *dev, netdev_features_t features); 551 void qede_set_rx_mode(struct net_device *ndev); 552 void qede_config_rx_mode(struct net_device *ndev); 553 void qede_fill_rss_params(struct qede_dev *edev, 554 struct qed_update_vport_rss_params *rss, u8 *update); 555 556 void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti); 557 void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti); 558 559 int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp); 560 561 #ifdef CONFIG_DCB 562 void qede_set_dcbnl_ops(struct net_device *ndev); 563 #endif 564 565 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level); 566 void qede_set_ethtool_ops(struct net_device *netdev); 567 void qede_set_udp_tunnels(struct qede_dev *edev); 568 void qede_reload(struct qede_dev *edev, 569 struct qede_reload_args *args, bool is_locked); 570 int qede_change_mtu(struct net_device *dev, int new_mtu); 571 void qede_fill_by_demand_stats(struct qede_dev *edev); 572 void __qede_lock(struct qede_dev *edev); 573 void __qede_unlock(struct qede_dev *edev); 574 bool qede_has_rx_work(struct qede_rx_queue *rxq); 575 int qede_txq_has_work(struct qede_tx_queue *txq); 576 void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count); 577 void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); 578 int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, 579 struct flow_cls_offload *f); 580 581 void qede_forced_speed_maps_init(void); 582 583 #define RX_RING_SIZE_POW 13 584 #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) 585 #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) 586 #define NUM_RX_BDS_MIN 128 587 #define NUM_RX_BDS_KDUMP_MIN 63 588 #define NUM_RX_BDS_DEF ((u16)BIT(10) - 1) 589 590 #define TX_RING_SIZE_POW 13 591 #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) 592 #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1) 593 #define NUM_TX_BDS_MIN 128 594 #define NUM_TX_BDS_KDUMP_MIN 63 595 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX 596 597 #define QEDE_MIN_PKT_LEN 64 598 #define QEDE_RX_HDR_SIZE 256 599 #define QEDE_MAX_JUMBO_PACKET_SIZE 9600 600 #define for_each_queue(i) for (i = 0; i < edev->num_queues; i++) 601 #define for_each_cos_in_txq(edev, var) \ 602 for ((var) = 0; (var) < (edev)->dev_info.num_tc; (var)++) 603 604 #endif /* _QEDE_H_ */ 605