1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #ifndef __HCLGE_MAIN_H 5 #define __HCLGE_MAIN_H 6 #include <linux/fs.h> 7 #include <linux/types.h> 8 #include <linux/phy.h> 9 #include <linux/if_vlan.h> 10 11 #include "hclge_cmd.h" 12 #include "hnae3.h" 13 14 #define HCLGE_MOD_VERSION "1.0" 15 #define HCLGE_DRIVER_NAME "hclge" 16 17 #define HCLGE_INVALID_VPORT 0xffff 18 19 #define HCLGE_ROCE_VECTOR_OFFSET 96 20 21 #define HCLGE_PF_CFG_BLOCK_SIZE 32 22 #define HCLGE_PF_CFG_DESC_NUM \ 23 (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES) 24 25 #define HCLGE_VECTOR_REG_BASE 0x20000 26 #define HCLGE_MISC_VECTOR_REG_BASE 0x20400 27 28 #define HCLGE_VECTOR_REG_OFFSET 0x4 29 #define HCLGE_VECTOR_VF_OFFSET 0x100000 30 31 #define HCLGE_RSS_IND_TBL_SIZE 512 32 #define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0) 33 #define HCLGE_RSS_KEY_SIZE 40 34 #define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0 35 #define HCLGE_RSS_HASH_ALGO_SIMPLE 1 36 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2 37 #define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0) 38 #define HCLGE_RSS_CFG_TBL_NUM \ 39 (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE) 40 41 #define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) 42 #define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) 43 #define HCLGE_D_PORT_BIT BIT(0) 44 #define HCLGE_S_PORT_BIT BIT(1) 45 #define HCLGE_D_IP_BIT BIT(2) 46 #define HCLGE_S_IP_BIT BIT(3) 47 #define HCLGE_V_TAG_BIT BIT(4) 48 49 #define HCLGE_RSS_TC_SIZE_0 1 50 #define HCLGE_RSS_TC_SIZE_1 2 51 #define HCLGE_RSS_TC_SIZE_2 4 52 #define HCLGE_RSS_TC_SIZE_3 8 53 #define HCLGE_RSS_TC_SIZE_4 16 54 #define HCLGE_RSS_TC_SIZE_5 32 55 #define HCLGE_RSS_TC_SIZE_6 64 56 #define HCLGE_RSS_TC_SIZE_7 128 57 58 #define HCLGE_MTA_TBL_SIZE 4096 59 60 #define HCLGE_TQP_RESET_TRY_TIMES 10 61 62 #define HCLGE_PHY_PAGE_MDIX 0 63 #define HCLGE_PHY_PAGE_COPPER 0 64 65 /* Page Selection Reg. */ 66 #define HCLGE_PHY_PAGE_REG 22 67 68 /* Copper Specific Control Register */ 69 #define HCLGE_PHY_CSC_REG 16 70 71 /* Copper Specific Status Register */ 72 #define HCLGE_PHY_CSS_REG 17 73 74 #define HCLGE_PHY_MDIX_CTRL_S 5 75 #define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5) 76 77 #define HCLGE_PHY_MDIX_STATUS_B 6 78 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11 79 80 /* Factor used to calculate offset and bitmap of VF num */ 81 #define HCLGE_VF_NUM_PER_CMD 64 82 #define HCLGE_VF_NUM_PER_BYTE 8 83 84 /* Reset related Registers */ 85 #define HCLGE_MISC_RESET_STS_REG 0x20700 86 #define HCLGE_MISC_VECTOR_INT_STS 0x20800 87 #define HCLGE_GLOBAL_RESET_REG 0x20A00 88 #define HCLGE_GLOBAL_RESET_BIT 0 89 #define HCLGE_CORE_RESET_BIT 1 90 #define HCLGE_FUN_RST_ING 0x20C00 91 #define HCLGE_FUN_RST_ING_B 0 92 93 /* Vector0 register bits define */ 94 #define HCLGE_VECTOR0_GLOBALRESET_INT_B 5 95 #define HCLGE_VECTOR0_CORERESET_INT_B 6 96 #define HCLGE_VECTOR0_IMPRESET_INT_B 7 97 98 /* Vector0 interrupt CMDQ event source register(RW) */ 99 #define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100 100 /* CMDQ register bits for RX event(=MBX event) */ 101 #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 102 103 #define HCLGE_MAC_DEFAULT_FRAME \ 104 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN) 105 #define HCLGE_MAC_MIN_FRAME 64 106 #define HCLGE_MAC_MAX_FRAME 9728 107 108 #define HCLGE_SUPPORT_1G_BIT BIT(0) 109 #define HCLGE_SUPPORT_10G_BIT BIT(1) 110 #define HCLGE_SUPPORT_25G_BIT BIT(2) 111 #define HCLGE_SUPPORT_50G_BIT BIT(3) 112 #define HCLGE_SUPPORT_100G_BIT BIT(4) 113 114 enum HCLGE_DEV_STATE { 115 HCLGE_STATE_REINITING, 116 HCLGE_STATE_DOWN, 117 HCLGE_STATE_DISABLED, 118 HCLGE_STATE_REMOVING, 119 HCLGE_STATE_SERVICE_INITED, 120 HCLGE_STATE_SERVICE_SCHED, 121 HCLGE_STATE_RST_SERVICE_SCHED, 122 HCLGE_STATE_RST_HANDLING, 123 HCLGE_STATE_MBX_SERVICE_SCHED, 124 HCLGE_STATE_MBX_HANDLING, 125 HCLGE_STATE_STATISTICS_UPDATING, 126 HCLGE_STATE_CMD_DISABLE, 127 HCLGE_STATE_MAX 128 }; 129 130 enum hclge_evt_cause { 131 HCLGE_VECTOR0_EVENT_RST, 132 HCLGE_VECTOR0_EVENT_MBX, 133 HCLGE_VECTOR0_EVENT_OTHER, 134 }; 135 136 #define HCLGE_MPF_ENBALE 1 137 138 enum HCLGE_MAC_SPEED { 139 HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ 140 HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */ 141 HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */ 142 HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */ 143 HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */ 144 HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */ 145 HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */ 146 HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */ 147 }; 148 149 enum HCLGE_MAC_DUPLEX { 150 HCLGE_MAC_HALF, 151 HCLGE_MAC_FULL 152 }; 153 154 enum hclge_mta_dmac_sel_type { 155 HCLGE_MAC_ADDR_47_36, 156 HCLGE_MAC_ADDR_46_35, 157 HCLGE_MAC_ADDR_45_34, 158 HCLGE_MAC_ADDR_44_33, 159 }; 160 161 struct hclge_mac { 162 u8 phy_addr; 163 u8 flag; 164 u8 media_type; 165 u8 mac_addr[ETH_ALEN]; 166 u8 autoneg; 167 u8 duplex; 168 u32 speed; 169 int link; /* store the link status of mac & phy (if phy exit)*/ 170 struct phy_device *phydev; 171 struct mii_bus *mdio_bus; 172 phy_interface_t phy_if; 173 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); 174 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 175 }; 176 177 struct hclge_hw { 178 void __iomem *io_base; 179 struct hclge_mac mac; 180 int num_vec; 181 struct hclge_cmq cmq; 182 }; 183 184 /* TQP stats */ 185 struct hlcge_tqp_stats { 186 /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ 187 u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ 188 /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ 189 u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ 190 }; 191 192 struct hclge_tqp { 193 /* copy of device pointer from pci_dev, 194 * used when perform DMA mapping 195 */ 196 struct device *dev; 197 struct hnae3_queue q; 198 struct hlcge_tqp_stats tqp_stats; 199 u16 index; /* Global index in a NIC controller */ 200 201 bool alloced; 202 }; 203 204 enum hclge_fc_mode { 205 HCLGE_FC_NONE, 206 HCLGE_FC_RX_PAUSE, 207 HCLGE_FC_TX_PAUSE, 208 HCLGE_FC_FULL, 209 HCLGE_FC_PFC, 210 HCLGE_FC_DEFAULT 211 }; 212 213 #define HCLGE_PG_NUM 4 214 #define HCLGE_SCH_MODE_SP 0 215 #define HCLGE_SCH_MODE_DWRR 1 216 struct hclge_pg_info { 217 u8 pg_id; 218 u8 pg_sch_mode; /* 0: sp; 1: dwrr */ 219 u8 tc_bit_map; 220 u32 bw_limit; 221 u8 tc_dwrr[HNAE3_MAX_TC]; 222 }; 223 224 struct hclge_tc_info { 225 u8 tc_id; 226 u8 tc_sch_mode; /* 0: sp; 1: dwrr */ 227 u8 pgid; 228 u32 bw_limit; 229 }; 230 231 struct hclge_cfg { 232 u8 vmdq_vport_num; 233 u8 tc_num; 234 u16 tqp_desc_num; 235 u16 rx_buf_len; 236 u16 rss_size_max; 237 u8 phy_addr; 238 u8 media_type; 239 u8 mac_addr[ETH_ALEN]; 240 u8 default_speed; 241 u32 numa_node_map; 242 u8 speed_ability; 243 }; 244 245 struct hclge_tm_info { 246 u8 num_tc; 247 u8 num_pg; /* It must be 1 if vNET-Base schd */ 248 u8 pg_dwrr[HCLGE_PG_NUM]; 249 u8 prio_tc[HNAE3_MAX_USER_PRIO]; 250 struct hclge_pg_info pg_info[HCLGE_PG_NUM]; 251 struct hclge_tc_info tc_info[HNAE3_MAX_TC]; 252 enum hclge_fc_mode fc_mode; 253 u8 hw_pfc_map; /* Allow for packet drop or not on this TC */ 254 }; 255 256 struct hclge_comm_stats_str { 257 char desc[ETH_GSTRING_LEN]; 258 unsigned long offset; 259 }; 260 261 /* all 64bit stats, opcode id: 0x0030 */ 262 struct hclge_64_bit_stats { 263 /* query_igu_stat */ 264 u64 igu_rx_oversize_pkt; 265 u64 igu_rx_undersize_pkt; 266 u64 igu_rx_out_all_pkt; 267 u64 igu_rx_uni_pkt; 268 u64 igu_rx_multi_pkt; 269 u64 igu_rx_broad_pkt; 270 u64 rsv0; 271 272 /* query_egu_stat */ 273 u64 egu_tx_out_all_pkt; 274 u64 egu_tx_uni_pkt; 275 u64 egu_tx_multi_pkt; 276 u64 egu_tx_broad_pkt; 277 278 /* ssu_ppp packet stats */ 279 u64 ssu_ppp_mac_key_num; 280 u64 ssu_ppp_host_key_num; 281 u64 ppp_ssu_mac_rlt_num; 282 u64 ppp_ssu_host_rlt_num; 283 284 /* ssu_tx_in_out_dfx_stats */ 285 u64 ssu_tx_in_num; 286 u64 ssu_tx_out_num; 287 /* ssu_rx_in_out_dfx_stats */ 288 u64 ssu_rx_in_num; 289 u64 ssu_rx_out_num; 290 }; 291 292 /* all 32bit stats, opcode id: 0x0031 */ 293 struct hclge_32_bit_stats { 294 u64 igu_rx_err_pkt; 295 u64 igu_rx_no_eof_pkt; 296 u64 igu_rx_no_sof_pkt; 297 u64 egu_tx_1588_pkt; 298 u64 egu_tx_err_pkt; 299 u64 ssu_full_drop_num; 300 u64 ssu_part_drop_num; 301 u64 ppp_key_drop_num; 302 u64 ppp_rlt_drop_num; 303 u64 ssu_key_drop_num; 304 u64 pkt_curr_buf_cnt; 305 u64 qcn_fb_rcv_cnt; 306 u64 qcn_fb_drop_cnt; 307 u64 qcn_fb_invaild_cnt; 308 u64 rsv0; 309 u64 rx_packet_tc0_in_cnt; 310 u64 rx_packet_tc1_in_cnt; 311 u64 rx_packet_tc2_in_cnt; 312 u64 rx_packet_tc3_in_cnt; 313 u64 rx_packet_tc4_in_cnt; 314 u64 rx_packet_tc5_in_cnt; 315 u64 rx_packet_tc6_in_cnt; 316 u64 rx_packet_tc7_in_cnt; 317 u64 rx_packet_tc0_out_cnt; 318 u64 rx_packet_tc1_out_cnt; 319 u64 rx_packet_tc2_out_cnt; 320 u64 rx_packet_tc3_out_cnt; 321 u64 rx_packet_tc4_out_cnt; 322 u64 rx_packet_tc5_out_cnt; 323 u64 rx_packet_tc6_out_cnt; 324 u64 rx_packet_tc7_out_cnt; 325 326 /* Tx packet level statistics */ 327 u64 tx_packet_tc0_in_cnt; 328 u64 tx_packet_tc1_in_cnt; 329 u64 tx_packet_tc2_in_cnt; 330 u64 tx_packet_tc3_in_cnt; 331 u64 tx_packet_tc4_in_cnt; 332 u64 tx_packet_tc5_in_cnt; 333 u64 tx_packet_tc6_in_cnt; 334 u64 tx_packet_tc7_in_cnt; 335 u64 tx_packet_tc0_out_cnt; 336 u64 tx_packet_tc1_out_cnt; 337 u64 tx_packet_tc2_out_cnt; 338 u64 tx_packet_tc3_out_cnt; 339 u64 tx_packet_tc4_out_cnt; 340 u64 tx_packet_tc5_out_cnt; 341 u64 tx_packet_tc6_out_cnt; 342 u64 tx_packet_tc7_out_cnt; 343 344 /* packet buffer statistics */ 345 u64 pkt_curr_buf_tc0_cnt; 346 u64 pkt_curr_buf_tc1_cnt; 347 u64 pkt_curr_buf_tc2_cnt; 348 u64 pkt_curr_buf_tc3_cnt; 349 u64 pkt_curr_buf_tc4_cnt; 350 u64 pkt_curr_buf_tc5_cnt; 351 u64 pkt_curr_buf_tc6_cnt; 352 u64 pkt_curr_buf_tc7_cnt; 353 354 u64 mb_uncopy_num; 355 u64 lo_pri_unicast_rlt_drop_num; 356 u64 hi_pri_multicast_rlt_drop_num; 357 u64 lo_pri_multicast_rlt_drop_num; 358 u64 rx_oq_drop_pkt_cnt; 359 u64 tx_oq_drop_pkt_cnt; 360 u64 nic_l2_err_drop_pkt_cnt; 361 u64 roc_l2_err_drop_pkt_cnt; 362 }; 363 364 /* mac stats ,opcode id: 0x0032 */ 365 struct hclge_mac_stats { 366 u64 mac_tx_mac_pause_num; 367 u64 mac_rx_mac_pause_num; 368 u64 mac_tx_pfc_pri0_pkt_num; 369 u64 mac_tx_pfc_pri1_pkt_num; 370 u64 mac_tx_pfc_pri2_pkt_num; 371 u64 mac_tx_pfc_pri3_pkt_num; 372 u64 mac_tx_pfc_pri4_pkt_num; 373 u64 mac_tx_pfc_pri5_pkt_num; 374 u64 mac_tx_pfc_pri6_pkt_num; 375 u64 mac_tx_pfc_pri7_pkt_num; 376 u64 mac_rx_pfc_pri0_pkt_num; 377 u64 mac_rx_pfc_pri1_pkt_num; 378 u64 mac_rx_pfc_pri2_pkt_num; 379 u64 mac_rx_pfc_pri3_pkt_num; 380 u64 mac_rx_pfc_pri4_pkt_num; 381 u64 mac_rx_pfc_pri5_pkt_num; 382 u64 mac_rx_pfc_pri6_pkt_num; 383 u64 mac_rx_pfc_pri7_pkt_num; 384 u64 mac_tx_total_pkt_num; 385 u64 mac_tx_total_oct_num; 386 u64 mac_tx_good_pkt_num; 387 u64 mac_tx_bad_pkt_num; 388 u64 mac_tx_good_oct_num; 389 u64 mac_tx_bad_oct_num; 390 u64 mac_tx_uni_pkt_num; 391 u64 mac_tx_multi_pkt_num; 392 u64 mac_tx_broad_pkt_num; 393 u64 mac_tx_undersize_pkt_num; 394 u64 mac_tx_oversize_pkt_num; 395 u64 mac_tx_64_oct_pkt_num; 396 u64 mac_tx_65_127_oct_pkt_num; 397 u64 mac_tx_128_255_oct_pkt_num; 398 u64 mac_tx_256_511_oct_pkt_num; 399 u64 mac_tx_512_1023_oct_pkt_num; 400 u64 mac_tx_1024_1518_oct_pkt_num; 401 u64 mac_tx_1519_2047_oct_pkt_num; 402 u64 mac_tx_2048_4095_oct_pkt_num; 403 u64 mac_tx_4096_8191_oct_pkt_num; 404 u64 rsv0; 405 u64 mac_tx_8192_9216_oct_pkt_num; 406 u64 mac_tx_9217_12287_oct_pkt_num; 407 u64 mac_tx_12288_16383_oct_pkt_num; 408 u64 mac_tx_1519_max_good_oct_pkt_num; 409 u64 mac_tx_1519_max_bad_oct_pkt_num; 410 411 u64 mac_rx_total_pkt_num; 412 u64 mac_rx_total_oct_num; 413 u64 mac_rx_good_pkt_num; 414 u64 mac_rx_bad_pkt_num; 415 u64 mac_rx_good_oct_num; 416 u64 mac_rx_bad_oct_num; 417 u64 mac_rx_uni_pkt_num; 418 u64 mac_rx_multi_pkt_num; 419 u64 mac_rx_broad_pkt_num; 420 u64 mac_rx_undersize_pkt_num; 421 u64 mac_rx_oversize_pkt_num; 422 u64 mac_rx_64_oct_pkt_num; 423 u64 mac_rx_65_127_oct_pkt_num; 424 u64 mac_rx_128_255_oct_pkt_num; 425 u64 mac_rx_256_511_oct_pkt_num; 426 u64 mac_rx_512_1023_oct_pkt_num; 427 u64 mac_rx_1024_1518_oct_pkt_num; 428 u64 mac_rx_1519_2047_oct_pkt_num; 429 u64 mac_rx_2048_4095_oct_pkt_num; 430 u64 mac_rx_4096_8191_oct_pkt_num; 431 u64 rsv1; 432 u64 mac_rx_8192_9216_oct_pkt_num; 433 u64 mac_rx_9217_12287_oct_pkt_num; 434 u64 mac_rx_12288_16383_oct_pkt_num; 435 u64 mac_rx_1519_max_good_oct_pkt_num; 436 u64 mac_rx_1519_max_bad_oct_pkt_num; 437 438 u64 mac_tx_fragment_pkt_num; 439 u64 mac_tx_undermin_pkt_num; 440 u64 mac_tx_jabber_pkt_num; 441 u64 mac_tx_err_all_pkt_num; 442 u64 mac_tx_from_app_good_pkt_num; 443 u64 mac_tx_from_app_bad_pkt_num; 444 u64 mac_rx_fragment_pkt_num; 445 u64 mac_rx_undermin_pkt_num; 446 u64 mac_rx_jabber_pkt_num; 447 u64 mac_rx_fcs_err_pkt_num; 448 u64 mac_rx_send_app_good_pkt_num; 449 u64 mac_rx_send_app_bad_pkt_num; 450 }; 451 452 #define HCLGE_STATS_TIMER_INTERVAL (60 * 5) 453 struct hclge_hw_stats { 454 struct hclge_mac_stats mac_stats; 455 struct hclge_64_bit_stats all_64_bit_stats; 456 struct hclge_32_bit_stats all_32_bit_stats; 457 u32 stats_timer; 458 }; 459 460 struct hclge_vlan_type_cfg { 461 u16 rx_ot_fst_vlan_type; 462 u16 rx_ot_sec_vlan_type; 463 u16 rx_in_fst_vlan_type; 464 u16 rx_in_sec_vlan_type; 465 u16 tx_ot_vlan_type; 466 u16 tx_in_vlan_type; 467 }; 468 469 #define HCLGE_VPORT_NUM 256 470 struct hclge_dev { 471 struct pci_dev *pdev; 472 struct hnae3_ae_dev *ae_dev; 473 struct hclge_hw hw; 474 struct hclge_misc_vector misc_vector; 475 struct hclge_hw_stats hw_stats; 476 unsigned long state; 477 478 enum hnae3_reset_type reset_type; 479 unsigned long reset_request; /* reset has been requested */ 480 unsigned long reset_pending; /* client rst is pending to be served */ 481 u32 fw_version; 482 u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ 483 u16 num_tqps; /* Num task queue pairs of this PF */ 484 u16 num_req_vfs; /* Num VFs requested for this PF */ 485 486 u16 base_tqp_pid; /* Base task tqp physical id of this PF */ 487 u16 alloc_rss_size; /* Allocated RSS task queue */ 488 u16 rss_size_max; /* HW defined max RSS task queue */ 489 490 u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */ 491 u16 num_alloc_vport; /* Num vports this driver supports */ 492 u32 numa_node_mask; 493 u16 rx_buf_len; 494 u16 num_desc; 495 u8 hw_tc_map; 496 u8 tc_num_last_time; 497 enum hclge_fc_mode fc_mode_last_time; 498 499 #define HCLGE_FLAG_TC_BASE_SCH_MODE 1 500 #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 501 u8 tx_sch_mode; 502 u8 tc_max; 503 u8 pfc_max; 504 505 u8 default_up; 506 u8 dcbx_cap; 507 struct hclge_tm_info tm_info; 508 509 u16 num_msi; 510 u16 num_msi_left; 511 u16 num_msi_used; 512 u32 base_msi_vector; 513 u16 *vector_status; 514 int *vector_irq; 515 u16 num_roce_msi; /* Num of roce vectors for this PF */ 516 int roce_base_vector; 517 518 u16 pending_udp_bitmap; 519 520 u16 rx_itr_default; 521 u16 tx_itr_default; 522 523 u16 adminq_work_limit; /* Num of admin receive queue desc to process */ 524 unsigned long service_timer_period; 525 unsigned long service_timer_previous; 526 struct timer_list service_timer; 527 struct work_struct service_task; 528 struct work_struct rst_service_task; 529 struct work_struct mbx_service_task; 530 531 bool cur_promisc; 532 int num_alloc_vfs; /* Actual number of VFs allocated */ 533 534 struct hclge_tqp *htqp; 535 struct hclge_vport *vport; 536 537 struct dentry *hclge_dbgfs; 538 539 struct hnae3_client *nic_client; 540 struct hnae3_client *roce_client; 541 542 #define HCLGE_FLAG_MAIN BIT(0) 543 #define HCLGE_FLAG_DCB_CAPABLE BIT(1) 544 #define HCLGE_FLAG_DCB_ENABLE BIT(2) 545 #define HCLGE_FLAG_MQPRIO_ENABLE BIT(3) 546 u32 flag; 547 548 u32 pkt_buf_size; /* Total pf buf size for tx/rx */ 549 u32 mps; /* Max packet size */ 550 551 enum hclge_mta_dmac_sel_type mta_mac_sel_type; 552 bool enable_mta; /* Multicast filter enable */ 553 554 struct hclge_vlan_type_cfg vlan_type_cfg; 555 556 unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; 557 }; 558 559 /* VPort level vlan tag configuration for TX direction */ 560 struct hclge_tx_vtag_cfg { 561 bool accept_tag1; /* Whether accept tag1 packet from host */ 562 bool accept_untag1; /* Whether accept untag1 packet from host */ 563 bool accept_tag2; 564 bool accept_untag2; 565 bool insert_tag1_en; /* Whether insert inner vlan tag */ 566 bool insert_tag2_en; /* Whether insert outer vlan tag */ 567 u16 default_tag1; /* The default inner vlan tag to insert */ 568 u16 default_tag2; /* The default outer vlan tag to insert */ 569 }; 570 571 /* VPort level vlan tag configuration for RX direction */ 572 struct hclge_rx_vtag_cfg { 573 bool strip_tag1_en; /* Whether strip inner vlan tag */ 574 bool strip_tag2_en; /* Whether strip outer vlan tag */ 575 bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */ 576 bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */ 577 }; 578 579 struct hclge_rss_tuple_cfg { 580 u8 ipv4_tcp_en; 581 u8 ipv4_udp_en; 582 u8 ipv4_sctp_en; 583 u8 ipv4_fragment_en; 584 u8 ipv6_tcp_en; 585 u8 ipv6_udp_en; 586 u8 ipv6_sctp_en; 587 u8 ipv6_fragment_en; 588 }; 589 590 struct hclge_vport { 591 u16 alloc_tqps; /* Allocated Tx/Rx queues */ 592 593 u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ 594 /* User configured lookup table entries */ 595 u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; 596 int rss_algo; /* User configured hash algorithm */ 597 /* User configured rss tuple sets */ 598 struct hclge_rss_tuple_cfg rss_tuple_sets; 599 600 u16 alloc_rss_size; 601 602 u16 qs_offset; 603 u16 bw_limit; /* VSI BW Limit (0 = disabled) */ 604 u8 dwrr; 605 606 struct hclge_tx_vtag_cfg txvlan_cfg; 607 struct hclge_rx_vtag_cfg rxvlan_cfg; 608 609 int vport_id; 610 struct hclge_dev *back; /* Back reference to associated dev */ 611 struct hnae3_handle nic; 612 struct hnae3_handle roce; 613 614 bool accept_mta_mc; /* whether to accept mta filter multicast */ 615 unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; 616 }; 617 618 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, 619 bool en_mc, bool en_bc, int vport_id); 620 621 int hclge_add_uc_addr_common(struct hclge_vport *vport, 622 const unsigned char *addr); 623 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 624 const unsigned char *addr); 625 int hclge_add_mc_addr_common(struct hclge_vport *vport, 626 const unsigned char *addr); 627 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 628 const unsigned char *addr); 629 630 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, 631 u8 func_id, 632 bool enable); 633 int hclge_update_mta_status_common(struct hclge_vport *vport, 634 unsigned long *status, 635 u16 idx, 636 u16 count, 637 bool update_filter); 638 639 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); 640 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 641 int vector_id, bool en, 642 struct hnae3_ring_chain_node *ring_chain); 643 644 static inline int hclge_get_queue_id(struct hnae3_queue *queue) 645 { 646 struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q); 647 648 return tqp->index; 649 } 650 651 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); 652 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 653 u16 vlan_id, bool is_kill); 654 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable); 655 656 int hclge_buffer_alloc(struct hclge_dev *hdev); 657 int hclge_rss_init_hw(struct hclge_dev *hdev); 658 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); 659 660 void hclge_mbx_handler(struct hclge_dev *hdev); 661 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); 662 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id); 663 int hclge_cfg_flowctrl(struct hclge_dev *hdev); 664 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); 665 #endif 666