1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #ifndef __HCLGE_MAIN_H 5 #define __HCLGE_MAIN_H 6 #include <linux/fs.h> 7 #include <linux/types.h> 8 #include <linux/phy.h> 9 #include <linux/if_vlan.h> 10 #include <linux/kfifo.h> 11 12 #include "hclge_cmd.h" 13 #include "hnae3.h" 14 15 #define HCLGE_MOD_VERSION "1.0" 16 #define HCLGE_DRIVER_NAME "hclge" 17 18 #define HCLGE_MAX_PF_NUM 8 19 20 #define HCLGE_VF_VPORT_START_NUM 1 21 22 #define HCLGE_RD_FIRST_STATS_NUM 2 23 #define HCLGE_RD_OTHER_STATS_NUM 4 24 25 #define HCLGE_INVALID_VPORT 0xffff 26 27 #define HCLGE_PF_CFG_BLOCK_SIZE 32 28 #define HCLGE_PF_CFG_DESC_NUM \ 29 (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES) 30 31 #define HCLGE_VECTOR_REG_BASE 0x20000 32 #define HCLGE_VECTOR_EXT_REG_BASE 0x30000 33 #define HCLGE_MISC_VECTOR_REG_BASE 0x20400 34 35 #define HCLGE_VECTOR_REG_OFFSET 0x4 36 #define HCLGE_VECTOR_REG_OFFSET_H 0x1000 37 #define HCLGE_VECTOR_VF_OFFSET 0x100000 38 39 #define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000 40 #define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004 41 #define HCLGE_CMDQ_TX_DEPTH_REG 0x27008 42 #define HCLGE_CMDQ_TX_TAIL_REG 0x27010 43 #define HCLGE_CMDQ_TX_HEAD_REG 0x27014 44 #define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018 45 #define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C 46 #define HCLGE_CMDQ_RX_DEPTH_REG 0x27020 47 #define HCLGE_CMDQ_RX_TAIL_REG 0x27024 48 #define HCLGE_CMDQ_RX_HEAD_REG 0x27028 49 #define HCLGE_CMDQ_INTR_STS_REG 0x27104 50 #define HCLGE_CMDQ_INTR_EN_REG 0x27108 51 #define HCLGE_CMDQ_INTR_GEN_REG 0x2710C 52 53 /* bar registers for common func */ 54 #define HCLGE_VECTOR0_OTER_EN_REG 0x20600 55 #define HCLGE_GRO_EN_REG 0x28000 56 57 /* bar registers for rcb */ 58 #define HCLGE_RING_RX_ADDR_L_REG 0x80000 59 #define HCLGE_RING_RX_ADDR_H_REG 0x80004 60 #define HCLGE_RING_RX_BD_NUM_REG 0x80008 61 #define HCLGE_RING_RX_BD_LENGTH_REG 0x8000C 62 #define HCLGE_RING_RX_MERGE_EN_REG 0x80014 63 #define HCLGE_RING_RX_TAIL_REG 0x80018 64 #define HCLGE_RING_RX_HEAD_REG 0x8001C 65 #define HCLGE_RING_RX_FBD_NUM_REG 0x80020 66 #define HCLGE_RING_RX_OFFSET_REG 0x80024 67 #define HCLGE_RING_RX_FBD_OFFSET_REG 0x80028 68 #define HCLGE_RING_RX_STASH_REG 0x80030 69 #define HCLGE_RING_RX_BD_ERR_REG 0x80034 70 #define HCLGE_RING_TX_ADDR_L_REG 0x80040 71 #define HCLGE_RING_TX_ADDR_H_REG 0x80044 72 #define HCLGE_RING_TX_BD_NUM_REG 0x80048 73 #define HCLGE_RING_TX_PRIORITY_REG 0x8004C 74 #define HCLGE_RING_TX_TC_REG 0x80050 75 #define HCLGE_RING_TX_MERGE_EN_REG 0x80054 76 #define HCLGE_RING_TX_TAIL_REG 0x80058 77 #define HCLGE_RING_TX_HEAD_REG 0x8005C 78 #define HCLGE_RING_TX_FBD_NUM_REG 0x80060 79 #define HCLGE_RING_TX_OFFSET_REG 0x80064 80 #define HCLGE_RING_TX_EBD_NUM_REG 0x80068 81 #define HCLGE_RING_TX_EBD_OFFSET_REG 0x80070 82 #define HCLGE_RING_TX_BD_ERR_REG 0x80074 83 #define HCLGE_RING_EN_REG 0x80090 84 85 /* bar registers for tqp interrupt */ 86 #define HCLGE_TQP_INTR_CTRL_REG 0x20000 87 #define HCLGE_TQP_INTR_GL0_REG 0x20100 88 #define HCLGE_TQP_INTR_GL1_REG 0x20200 89 #define HCLGE_TQP_INTR_GL2_REG 0x20300 90 #define HCLGE_TQP_INTR_RL_REG 0x20900 91 92 #define HCLGE_RSS_IND_TBL_SIZE 512 93 #define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0) 94 #define HCLGE_RSS_KEY_SIZE 40 95 #define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0 96 #define HCLGE_RSS_HASH_ALGO_SIMPLE 1 97 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2 98 #define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0) 99 100 #define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) 101 #define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) 102 #define HCLGE_D_PORT_BIT BIT(0) 103 #define HCLGE_S_PORT_BIT BIT(1) 104 #define HCLGE_D_IP_BIT BIT(2) 105 #define HCLGE_S_IP_BIT BIT(3) 106 #define HCLGE_V_TAG_BIT BIT(4) 107 #define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \ 108 (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT) 109 110 #define HCLGE_RSS_TC_SIZE_0 1 111 #define HCLGE_RSS_TC_SIZE_1 2 112 #define HCLGE_RSS_TC_SIZE_2 4 113 #define HCLGE_RSS_TC_SIZE_3 8 114 #define HCLGE_RSS_TC_SIZE_4 16 115 #define HCLGE_RSS_TC_SIZE_5 32 116 #define HCLGE_RSS_TC_SIZE_6 64 117 #define HCLGE_RSS_TC_SIZE_7 128 118 119 #define HCLGE_UMV_TBL_SIZE 3072 120 #define HCLGE_DEFAULT_UMV_SPACE_PER_PF \ 121 (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM) 122 123 #define HCLGE_TQP_RESET_TRY_TIMES 200 124 125 #define HCLGE_PHY_PAGE_MDIX 0 126 #define HCLGE_PHY_PAGE_COPPER 0 127 128 /* Page Selection Reg. */ 129 #define HCLGE_PHY_PAGE_REG 22 130 131 /* Copper Specific Control Register */ 132 #define HCLGE_PHY_CSC_REG 16 133 134 /* Copper Specific Status Register */ 135 #define HCLGE_PHY_CSS_REG 17 136 137 #define HCLGE_PHY_MDIX_CTRL_S 5 138 #define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5) 139 140 #define HCLGE_PHY_MDIX_STATUS_B 6 141 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11 142 143 #define HCLGE_GET_DFX_REG_TYPE_CNT 4 144 145 /* Factor used to calculate offset and bitmap of VF num */ 146 #define HCLGE_VF_NUM_PER_CMD 64 147 148 #define HCLGE_MAX_QSET_NUM 1024 149 150 enum HLCGE_PORT_TYPE { 151 HOST_PORT, 152 NETWORK_PORT 153 }; 154 155 #define PF_VPORT_ID 0 156 157 #define HCLGE_PF_ID_S 0 158 #define HCLGE_PF_ID_M GENMASK(2, 0) 159 #define HCLGE_VF_ID_S 3 160 #define HCLGE_VF_ID_M GENMASK(10, 3) 161 #define HCLGE_PORT_TYPE_B 11 162 #define HCLGE_NETWORK_PORT_ID_S 0 163 #define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0) 164 165 /* Reset related Registers */ 166 #define HCLGE_PF_OTHER_INT_REG 0x20600 167 #define HCLGE_MISC_RESET_STS_REG 0x20700 168 #define HCLGE_MISC_VECTOR_INT_STS 0x20800 169 #define HCLGE_GLOBAL_RESET_REG 0x20A00 170 #define HCLGE_GLOBAL_RESET_BIT 0 171 #define HCLGE_CORE_RESET_BIT 1 172 #define HCLGE_IMP_RESET_BIT 2 173 #define HCLGE_RESET_INT_M GENMASK(7, 5) 174 #define HCLGE_FUN_RST_ING 0x20C00 175 #define HCLGE_FUN_RST_ING_B 0 176 177 /* Vector0 register bits define */ 178 #define HCLGE_VECTOR0_GLOBALRESET_INT_B 5 179 #define HCLGE_VECTOR0_CORERESET_INT_B 6 180 #define HCLGE_VECTOR0_IMPRESET_INT_B 7 181 182 /* Vector0 interrupt CMDQ event source register(RW) */ 183 #define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100 184 /* CMDQ register bits for RX event(=MBX event) */ 185 #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 186 187 #define HCLGE_VECTOR0_IMP_RESET_INT_B 1 188 #define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U 189 #define HCLGE_VECTOR0_IMP_RD_POISON_B 5U 190 191 #define HCLGE_MAC_DEFAULT_FRAME \ 192 (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN) 193 #define HCLGE_MAC_MIN_FRAME 64 194 #define HCLGE_MAC_MAX_FRAME 9728 195 196 #define HCLGE_SUPPORT_1G_BIT BIT(0) 197 #define HCLGE_SUPPORT_10G_BIT BIT(1) 198 #define HCLGE_SUPPORT_25G_BIT BIT(2) 199 #define HCLGE_SUPPORT_50G_BIT BIT(3) 200 #define HCLGE_SUPPORT_100G_BIT BIT(4) 201 /* to be compatible with exsit board */ 202 #define HCLGE_SUPPORT_40G_BIT BIT(5) 203 #define HCLGE_SUPPORT_100M_BIT BIT(6) 204 #define HCLGE_SUPPORT_10M_BIT BIT(7) 205 #define HCLGE_SUPPORT_200G_BIT BIT(8) 206 #define HCLGE_SUPPORT_GE \ 207 (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT) 208 209 enum HCLGE_DEV_STATE { 210 HCLGE_STATE_REINITING, 211 HCLGE_STATE_DOWN, 212 HCLGE_STATE_DISABLED, 213 HCLGE_STATE_REMOVING, 214 HCLGE_STATE_NIC_REGISTERED, 215 HCLGE_STATE_ROCE_REGISTERED, 216 HCLGE_STATE_SERVICE_INITED, 217 HCLGE_STATE_RST_SERVICE_SCHED, 218 HCLGE_STATE_RST_HANDLING, 219 HCLGE_STATE_MBX_SERVICE_SCHED, 220 HCLGE_STATE_MBX_HANDLING, 221 HCLGE_STATE_STATISTICS_UPDATING, 222 HCLGE_STATE_CMD_DISABLE, 223 HCLGE_STATE_LINK_UPDATING, 224 HCLGE_STATE_PROMISC_CHANGED, 225 HCLGE_STATE_RST_FAIL, 226 HCLGE_STATE_FD_TBL_CHANGED, 227 HCLGE_STATE_FD_CLEAR_ALL, 228 HCLGE_STATE_FD_USER_DEF_CHANGED, 229 HCLGE_STATE_MAX 230 }; 231 232 enum hclge_evt_cause { 233 HCLGE_VECTOR0_EVENT_RST, 234 HCLGE_VECTOR0_EVENT_MBX, 235 HCLGE_VECTOR0_EVENT_ERR, 236 HCLGE_VECTOR0_EVENT_OTHER, 237 }; 238 239 enum HCLGE_MAC_SPEED { 240 HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */ 241 HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ 242 HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */ 243 HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */ 244 HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */ 245 HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */ 246 HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */ 247 HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */ 248 HCLGE_MAC_SPEED_100G = 100000, /* 100000 Mbps = 100 Gbps */ 249 HCLGE_MAC_SPEED_200G = 200000 /* 200000 Mbps = 200 Gbps */ 250 }; 251 252 enum HCLGE_MAC_DUPLEX { 253 HCLGE_MAC_HALF, 254 HCLGE_MAC_FULL 255 }; 256 257 #define QUERY_SFP_SPEED 0 258 #define QUERY_ACTIVE_SPEED 1 259 260 struct hclge_mac { 261 u8 mac_id; 262 u8 phy_addr; 263 u8 flag; 264 u8 media_type; /* port media type, e.g. fibre/copper/backplane */ 265 u8 mac_addr[ETH_ALEN]; 266 u8 autoneg; 267 u8 duplex; 268 u8 support_autoneg; 269 u8 speed_type; /* 0: sfp speed, 1: active speed */ 270 u32 speed; 271 u32 max_speed; 272 u32 speed_ability; /* speed ability supported by current media */ 273 u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */ 274 u32 fec_mode; /* active fec mode */ 275 u32 user_fec_mode; 276 u32 fec_ability; 277 int link; /* store the link status of mac & phy (if phy exists) */ 278 struct phy_device *phydev; 279 struct mii_bus *mdio_bus; 280 phy_interface_t phy_if; 281 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); 282 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 283 }; 284 285 struct hclge_hw { 286 void __iomem *io_base; 287 void __iomem *mem_base; 288 struct hclge_mac mac; 289 int num_vec; 290 struct hclge_cmq cmq; 291 }; 292 293 /* TQP stats */ 294 struct hlcge_tqp_stats { 295 /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ 296 u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ 297 /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ 298 u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ 299 }; 300 301 struct hclge_tqp { 302 /* copy of device pointer from pci_dev, 303 * used when perform DMA mapping 304 */ 305 struct device *dev; 306 struct hnae3_queue q; 307 struct hlcge_tqp_stats tqp_stats; 308 u16 index; /* Global index in a NIC controller */ 309 310 bool alloced; 311 }; 312 313 enum hclge_fc_mode { 314 HCLGE_FC_NONE, 315 HCLGE_FC_RX_PAUSE, 316 HCLGE_FC_TX_PAUSE, 317 HCLGE_FC_FULL, 318 HCLGE_FC_PFC, 319 HCLGE_FC_DEFAULT 320 }; 321 322 enum hclge_link_fail_code { 323 HCLGE_LF_NORMAL, 324 HCLGE_LF_REF_CLOCK_LOST, 325 HCLGE_LF_XSFP_TX_DISABLE, 326 HCLGE_LF_XSFP_ABSENT, 327 }; 328 329 #define HCLGE_LINK_STATUS_DOWN 0 330 #define HCLGE_LINK_STATUS_UP 1 331 332 #define HCLGE_PG_NUM 4 333 #define HCLGE_SCH_MODE_SP 0 334 #define HCLGE_SCH_MODE_DWRR 1 335 struct hclge_pg_info { 336 u8 pg_id; 337 u8 pg_sch_mode; /* 0: sp; 1: dwrr */ 338 u8 tc_bit_map; 339 u32 bw_limit; 340 u8 tc_dwrr[HNAE3_MAX_TC]; 341 }; 342 343 struct hclge_tc_info { 344 u8 tc_id; 345 u8 tc_sch_mode; /* 0: sp; 1: dwrr */ 346 u8 pgid; 347 u32 bw_limit; 348 }; 349 350 struct hclge_cfg { 351 u8 tc_num; 352 u16 tqp_desc_num; 353 u16 rx_buf_len; 354 u16 vf_rss_size_max; 355 u16 pf_rss_size_max; 356 u8 phy_addr; 357 u8 media_type; 358 u8 mac_addr[ETH_ALEN]; 359 u8 default_speed; 360 u32 numa_node_map; 361 u16 speed_ability; 362 u16 umv_space; 363 }; 364 365 struct hclge_tm_info { 366 u8 num_tc; 367 u8 num_pg; /* It must be 1 if vNET-Base schd */ 368 u8 pg_dwrr[HCLGE_PG_NUM]; 369 u8 prio_tc[HNAE3_MAX_USER_PRIO]; 370 struct hclge_pg_info pg_info[HCLGE_PG_NUM]; 371 struct hclge_tc_info tc_info[HNAE3_MAX_TC]; 372 enum hclge_fc_mode fc_mode; 373 u8 hw_pfc_map; /* Allow for packet drop or not on this TC */ 374 u8 pfc_en; /* PFC enabled or not for user priority */ 375 }; 376 377 struct hclge_comm_stats_str { 378 char desc[ETH_GSTRING_LEN]; 379 unsigned long offset; 380 }; 381 382 /* mac stats ,opcode id: 0x0032 */ 383 struct hclge_mac_stats { 384 u64 mac_tx_mac_pause_num; 385 u64 mac_rx_mac_pause_num; 386 u64 mac_tx_pfc_pri0_pkt_num; 387 u64 mac_tx_pfc_pri1_pkt_num; 388 u64 mac_tx_pfc_pri2_pkt_num; 389 u64 mac_tx_pfc_pri3_pkt_num; 390 u64 mac_tx_pfc_pri4_pkt_num; 391 u64 mac_tx_pfc_pri5_pkt_num; 392 u64 mac_tx_pfc_pri6_pkt_num; 393 u64 mac_tx_pfc_pri7_pkt_num; 394 u64 mac_rx_pfc_pri0_pkt_num; 395 u64 mac_rx_pfc_pri1_pkt_num; 396 u64 mac_rx_pfc_pri2_pkt_num; 397 u64 mac_rx_pfc_pri3_pkt_num; 398 u64 mac_rx_pfc_pri4_pkt_num; 399 u64 mac_rx_pfc_pri5_pkt_num; 400 u64 mac_rx_pfc_pri6_pkt_num; 401 u64 mac_rx_pfc_pri7_pkt_num; 402 u64 mac_tx_total_pkt_num; 403 u64 mac_tx_total_oct_num; 404 u64 mac_tx_good_pkt_num; 405 u64 mac_tx_bad_pkt_num; 406 u64 mac_tx_good_oct_num; 407 u64 mac_tx_bad_oct_num; 408 u64 mac_tx_uni_pkt_num; 409 u64 mac_tx_multi_pkt_num; 410 u64 mac_tx_broad_pkt_num; 411 u64 mac_tx_undersize_pkt_num; 412 u64 mac_tx_oversize_pkt_num; 413 u64 mac_tx_64_oct_pkt_num; 414 u64 mac_tx_65_127_oct_pkt_num; 415 u64 mac_tx_128_255_oct_pkt_num; 416 u64 mac_tx_256_511_oct_pkt_num; 417 u64 mac_tx_512_1023_oct_pkt_num; 418 u64 mac_tx_1024_1518_oct_pkt_num; 419 u64 mac_tx_1519_2047_oct_pkt_num; 420 u64 mac_tx_2048_4095_oct_pkt_num; 421 u64 mac_tx_4096_8191_oct_pkt_num; 422 u64 rsv0; 423 u64 mac_tx_8192_9216_oct_pkt_num; 424 u64 mac_tx_9217_12287_oct_pkt_num; 425 u64 mac_tx_12288_16383_oct_pkt_num; 426 u64 mac_tx_1519_max_good_oct_pkt_num; 427 u64 mac_tx_1519_max_bad_oct_pkt_num; 428 429 u64 mac_rx_total_pkt_num; 430 u64 mac_rx_total_oct_num; 431 u64 mac_rx_good_pkt_num; 432 u64 mac_rx_bad_pkt_num; 433 u64 mac_rx_good_oct_num; 434 u64 mac_rx_bad_oct_num; 435 u64 mac_rx_uni_pkt_num; 436 u64 mac_rx_multi_pkt_num; 437 u64 mac_rx_broad_pkt_num; 438 u64 mac_rx_undersize_pkt_num; 439 u64 mac_rx_oversize_pkt_num; 440 u64 mac_rx_64_oct_pkt_num; 441 u64 mac_rx_65_127_oct_pkt_num; 442 u64 mac_rx_128_255_oct_pkt_num; 443 u64 mac_rx_256_511_oct_pkt_num; 444 u64 mac_rx_512_1023_oct_pkt_num; 445 u64 mac_rx_1024_1518_oct_pkt_num; 446 u64 mac_rx_1519_2047_oct_pkt_num; 447 u64 mac_rx_2048_4095_oct_pkt_num; 448 u64 mac_rx_4096_8191_oct_pkt_num; 449 u64 rsv1; 450 u64 mac_rx_8192_9216_oct_pkt_num; 451 u64 mac_rx_9217_12287_oct_pkt_num; 452 u64 mac_rx_12288_16383_oct_pkt_num; 453 u64 mac_rx_1519_max_good_oct_pkt_num; 454 u64 mac_rx_1519_max_bad_oct_pkt_num; 455 456 u64 mac_tx_fragment_pkt_num; 457 u64 mac_tx_undermin_pkt_num; 458 u64 mac_tx_jabber_pkt_num; 459 u64 mac_tx_err_all_pkt_num; 460 u64 mac_tx_from_app_good_pkt_num; 461 u64 mac_tx_from_app_bad_pkt_num; 462 u64 mac_rx_fragment_pkt_num; 463 u64 mac_rx_undermin_pkt_num; 464 u64 mac_rx_jabber_pkt_num; 465 u64 mac_rx_fcs_err_pkt_num; 466 u64 mac_rx_send_app_good_pkt_num; 467 u64 mac_rx_send_app_bad_pkt_num; 468 u64 mac_tx_pfc_pause_pkt_num; 469 u64 mac_rx_pfc_pause_pkt_num; 470 u64 mac_tx_ctrl_pkt_num; 471 u64 mac_rx_ctrl_pkt_num; 472 }; 473 474 #define HCLGE_STATS_TIMER_INTERVAL 300UL 475 476 struct hclge_vlan_type_cfg { 477 u16 rx_ot_fst_vlan_type; 478 u16 rx_ot_sec_vlan_type; 479 u16 rx_in_fst_vlan_type; 480 u16 rx_in_sec_vlan_type; 481 u16 tx_ot_vlan_type; 482 u16 tx_in_vlan_type; 483 }; 484 485 enum HCLGE_FD_MODE { 486 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1, 487 HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2, 488 HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1, 489 HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2, 490 }; 491 492 enum HCLGE_FD_KEY_TYPE { 493 HCLGE_FD_KEY_BASE_ON_PTYPE, 494 HCLGE_FD_KEY_BASE_ON_TUPLE, 495 }; 496 497 enum HCLGE_FD_STAGE { 498 HCLGE_FD_STAGE_1, 499 HCLGE_FD_STAGE_2, 500 MAX_STAGE_NUM, 501 }; 502 503 /* OUTER_XXX indicates tuples in tunnel header of tunnel packet 504 * INNER_XXX indicate tuples in tunneled header of tunnel packet or 505 * tuples of non-tunnel packet 506 */ 507 enum HCLGE_FD_TUPLE { 508 OUTER_DST_MAC, 509 OUTER_SRC_MAC, 510 OUTER_VLAN_TAG_FST, 511 OUTER_VLAN_TAG_SEC, 512 OUTER_ETH_TYPE, 513 OUTER_L2_RSV, 514 OUTER_IP_TOS, 515 OUTER_IP_PROTO, 516 OUTER_SRC_IP, 517 OUTER_DST_IP, 518 OUTER_L3_RSV, 519 OUTER_SRC_PORT, 520 OUTER_DST_PORT, 521 OUTER_L4_RSV, 522 OUTER_TUN_VNI, 523 OUTER_TUN_FLOW_ID, 524 INNER_DST_MAC, 525 INNER_SRC_MAC, 526 INNER_VLAN_TAG_FST, 527 INNER_VLAN_TAG_SEC, 528 INNER_ETH_TYPE, 529 INNER_L2_RSV, 530 INNER_IP_TOS, 531 INNER_IP_PROTO, 532 INNER_SRC_IP, 533 INNER_DST_IP, 534 INNER_L3_RSV, 535 INNER_SRC_PORT, 536 INNER_DST_PORT, 537 INNER_L4_RSV, 538 MAX_TUPLE, 539 }; 540 541 #define HCLGE_FD_TUPLE_USER_DEF_TUPLES \ 542 (BIT(INNER_L2_RSV) | BIT(INNER_L3_RSV) | BIT(INNER_L4_RSV)) 543 544 enum HCLGE_FD_META_DATA { 545 PACKET_TYPE_ID, 546 IP_FRAGEMENT, 547 ROCE_TYPE, 548 NEXT_KEY, 549 VLAN_NUMBER, 550 SRC_VPORT, 551 DST_VPORT, 552 TUNNEL_PACKET, 553 MAX_META_DATA, 554 }; 555 556 enum HCLGE_FD_KEY_OPT { 557 KEY_OPT_U8, 558 KEY_OPT_LE16, 559 KEY_OPT_LE32, 560 KEY_OPT_MAC, 561 KEY_OPT_IP, 562 KEY_OPT_VNI, 563 }; 564 565 struct key_info { 566 u8 key_type; 567 u8 key_length; /* use bit as unit */ 568 enum HCLGE_FD_KEY_OPT key_opt; 569 int offset; 570 int moffset; 571 }; 572 573 #define MAX_KEY_LENGTH 400 574 #define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4) 575 #define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) 576 #define MAX_META_DATA_LENGTH 32 577 578 #define HCLGE_FD_MAX_USER_DEF_OFFSET 9000 579 #define HCLGE_FD_USER_DEF_DATA GENMASK(15, 0) 580 #define HCLGE_FD_USER_DEF_OFFSET GENMASK(15, 0) 581 #define HCLGE_FD_USER_DEF_OFFSET_UNMASK GENMASK(15, 0) 582 583 /* assigned by firmware, the real filter number for each pf may be less */ 584 #define MAX_FD_FILTER_NUM 4096 585 #define HCLGE_ARFS_EXPIRE_INTERVAL 5UL 586 587 enum HCLGE_FD_ACTIVE_RULE_TYPE { 588 HCLGE_FD_RULE_NONE, 589 HCLGE_FD_ARFS_ACTIVE, 590 HCLGE_FD_EP_ACTIVE, 591 HCLGE_FD_TC_FLOWER_ACTIVE, 592 }; 593 594 enum HCLGE_FD_PACKET_TYPE { 595 NIC_PACKET, 596 ROCE_PACKET, 597 }; 598 599 enum HCLGE_FD_ACTION { 600 HCLGE_FD_ACTION_SELECT_QUEUE, 601 HCLGE_FD_ACTION_DROP_PACKET, 602 HCLGE_FD_ACTION_SELECT_TC, 603 }; 604 605 enum HCLGE_FD_NODE_STATE { 606 HCLGE_FD_TO_ADD, 607 HCLGE_FD_TO_DEL, 608 HCLGE_FD_ACTIVE, 609 HCLGE_FD_DELETED, 610 }; 611 612 enum HCLGE_FD_USER_DEF_LAYER { 613 HCLGE_FD_USER_DEF_NONE, 614 HCLGE_FD_USER_DEF_L2, 615 HCLGE_FD_USER_DEF_L3, 616 HCLGE_FD_USER_DEF_L4, 617 }; 618 619 #define HCLGE_FD_USER_DEF_LAYER_NUM 3 620 struct hclge_fd_user_def_cfg { 621 u16 ref_cnt; 622 u16 offset; 623 }; 624 625 struct hclge_fd_user_def_info { 626 enum HCLGE_FD_USER_DEF_LAYER layer; 627 u16 data; 628 u16 data_mask; 629 u16 offset; 630 }; 631 632 struct hclge_fd_key_cfg { 633 u8 key_sel; 634 u8 inner_sipv6_word_en; 635 u8 inner_dipv6_word_en; 636 u8 outer_sipv6_word_en; 637 u8 outer_dipv6_word_en; 638 u32 tuple_active; 639 u32 meta_data_active; 640 }; 641 642 struct hclge_fd_cfg { 643 u8 fd_mode; 644 u16 max_key_length; /* use bit as unit */ 645 u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */ 646 u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */ 647 struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM]; 648 struct hclge_fd_user_def_cfg user_def_cfg[HCLGE_FD_USER_DEF_LAYER_NUM]; 649 }; 650 651 #define IPV4_INDEX 3 652 #define IPV6_SIZE 4 653 struct hclge_fd_rule_tuples { 654 u8 src_mac[ETH_ALEN]; 655 u8 dst_mac[ETH_ALEN]; 656 /* Be compatible for ip address of both ipv4 and ipv6. 657 * For ipv4 address, we store it in src/dst_ip[3]. 658 */ 659 u32 src_ip[IPV6_SIZE]; 660 u32 dst_ip[IPV6_SIZE]; 661 u16 src_port; 662 u16 dst_port; 663 u16 vlan_tag1; 664 u16 ether_proto; 665 u16 l2_user_def; 666 u16 l3_user_def; 667 u32 l4_user_def; 668 u8 ip_tos; 669 u8 ip_proto; 670 }; 671 672 struct hclge_fd_rule { 673 struct hlist_node rule_node; 674 struct hclge_fd_rule_tuples tuples; 675 struct hclge_fd_rule_tuples tuples_mask; 676 u32 unused_tuple; 677 u32 flow_type; 678 union { 679 struct { 680 unsigned long cookie; 681 u8 tc; 682 } cls_flower; 683 struct { 684 u16 flow_id; /* only used for arfs */ 685 } arfs; 686 struct { 687 struct hclge_fd_user_def_info user_def; 688 } ep; 689 }; 690 u16 queue_id; 691 u16 vf_id; 692 u16 location; 693 enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type; 694 enum HCLGE_FD_NODE_STATE state; 695 u8 action; 696 }; 697 698 struct hclge_fd_ad_data { 699 u16 ad_id; 700 u8 drop_packet; 701 u8 forward_to_direct_queue; 702 u16 queue_id; 703 u8 use_counter; 704 u8 counter_id; 705 u8 use_next_stage; 706 u8 write_rule_id_to_bd; 707 u8 next_input_key; 708 u16 rule_id; 709 u16 tc_size; 710 u8 override_tc; 711 }; 712 713 enum HCLGE_MAC_NODE_STATE { 714 HCLGE_MAC_TO_ADD, 715 HCLGE_MAC_TO_DEL, 716 HCLGE_MAC_ACTIVE 717 }; 718 719 struct hclge_mac_node { 720 struct list_head node; 721 enum HCLGE_MAC_NODE_STATE state; 722 u8 mac_addr[ETH_ALEN]; 723 }; 724 725 enum HCLGE_MAC_ADDR_TYPE { 726 HCLGE_MAC_ADDR_UC, 727 HCLGE_MAC_ADDR_MC 728 }; 729 730 struct hclge_vport_vlan_cfg { 731 struct list_head node; 732 int hd_tbl_status; 733 u16 vlan_id; 734 }; 735 736 struct hclge_rst_stats { 737 u32 reset_done_cnt; /* the number of reset has completed */ 738 u32 hw_reset_done_cnt; /* the number of HW reset has completed */ 739 u32 pf_rst_cnt; /* the number of PF reset */ 740 u32 flr_rst_cnt; /* the number of FLR */ 741 u32 global_rst_cnt; /* the number of GLOBAL */ 742 u32 imp_rst_cnt; /* the number of IMP reset */ 743 u32 reset_cnt; /* the number of reset */ 744 u32 reset_fail_cnt; /* the number of reset fail */ 745 }; 746 747 /* time and register status when mac tunnel interruption occur */ 748 struct hclge_mac_tnl_stats { 749 u64 time; 750 u32 status; 751 }; 752 753 #define HCLGE_RESET_INTERVAL (10 * HZ) 754 #define HCLGE_WAIT_RESET_DONE 100 755 756 #pragma pack(1) 757 struct hclge_vf_vlan_cfg { 758 u8 mbx_cmd; 759 u8 subcode; 760 u8 is_kill; 761 u16 vlan; 762 u16 proto; 763 }; 764 765 #pragma pack() 766 767 /* For each bit of TCAM entry, it uses a pair of 'x' and 768 * 'y' to indicate which value to match, like below: 769 * ---------------------------------- 770 * | bit x | bit y | search value | 771 * ---------------------------------- 772 * | 0 | 0 | always hit | 773 * ---------------------------------- 774 * | 1 | 0 | match '0' | 775 * ---------------------------------- 776 * | 0 | 1 | match '1' | 777 * ---------------------------------- 778 * | 1 | 1 | invalid | 779 * ---------------------------------- 780 * Then for input key(k) and mask(v), we can calculate the value by 781 * the formulae: 782 * x = (~k) & v 783 * y = (k ^ ~v) & k 784 */ 785 #define calc_x(x, k, v) (x = ~(k) & (v)) 786 #define calc_y(y, k, v) \ 787 do { \ 788 const typeof(k) _k_ = (k); \ 789 const typeof(v) _v_ = (v); \ 790 (y) = (_k_ ^ ~_v_) & (_k_); \ 791 } while (0) 792 793 #define HCLGE_MAC_TNL_LOG_SIZE 8 794 #define HCLGE_VPORT_NUM 256 795 struct hclge_dev { 796 struct pci_dev *pdev; 797 struct hnae3_ae_dev *ae_dev; 798 struct hclge_hw hw; 799 struct hclge_misc_vector misc_vector; 800 struct hclge_mac_stats mac_stats; 801 unsigned long state; 802 unsigned long flr_state; 803 unsigned long last_reset_time; 804 805 enum hnae3_reset_type reset_type; 806 enum hnae3_reset_type reset_level; 807 unsigned long default_reset_request; 808 unsigned long reset_request; /* reset has been requested */ 809 unsigned long reset_pending; /* client rst is pending to be served */ 810 struct hclge_rst_stats rst_stats; 811 struct semaphore reset_sem; /* protect reset process */ 812 u32 fw_version; 813 u16 num_tqps; /* Num task queue pairs of this PF */ 814 u16 num_req_vfs; /* Num VFs requested for this PF */ 815 816 u16 base_tqp_pid; /* Base task tqp physical id of this PF */ 817 u16 alloc_rss_size; /* Allocated RSS task queue */ 818 u16 vf_rss_size_max; /* HW defined VF max RSS task queue */ 819 u16 pf_rss_size_max; /* HW defined PF max RSS task queue */ 820 821 u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */ 822 u16 num_alloc_vport; /* Num vports this driver supports */ 823 u32 numa_node_mask; 824 u16 rx_buf_len; 825 u16 num_tx_desc; /* desc num of per tx queue */ 826 u16 num_rx_desc; /* desc num of per rx queue */ 827 u8 hw_tc_map; 828 enum hclge_fc_mode fc_mode_last_time; 829 u8 support_sfp_query; 830 831 #define HCLGE_FLAG_TC_BASE_SCH_MODE 1 832 #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 833 u8 tx_sch_mode; 834 u8 tc_max; 835 u8 pfc_max; 836 837 u8 default_up; 838 u8 dcbx_cap; 839 struct hclge_tm_info tm_info; 840 841 u16 num_msi; 842 u16 num_msi_left; 843 u16 num_msi_used; 844 u32 base_msi_vector; 845 u16 *vector_status; 846 int *vector_irq; 847 u16 num_nic_msi; /* Num of nic vectors for this PF */ 848 u16 num_roce_msi; /* Num of roce vectors for this PF */ 849 int roce_base_vector; 850 851 unsigned long service_timer_period; 852 unsigned long service_timer_previous; 853 struct timer_list reset_timer; 854 struct delayed_work service_task; 855 856 bool cur_promisc; 857 int num_alloc_vfs; /* Actual number of VFs allocated */ 858 859 struct hclge_tqp *htqp; 860 struct hclge_vport *vport; 861 862 struct dentry *hclge_dbgfs; 863 864 struct hnae3_client *nic_client; 865 struct hnae3_client *roce_client; 866 867 #define HCLGE_FLAG_MAIN BIT(0) 868 #define HCLGE_FLAG_DCB_CAPABLE BIT(1) 869 #define HCLGE_FLAG_DCB_ENABLE BIT(2) 870 #define HCLGE_FLAG_MQPRIO_ENABLE BIT(3) 871 u32 flag; 872 873 u32 pkt_buf_size; /* Total pf buf size for tx/rx */ 874 u32 tx_buf_size; /* Tx buffer size for each TC */ 875 u32 dv_buf_size; /* Dv buffer size for each TC */ 876 877 u32 mps; /* Max packet size */ 878 /* vport_lock protect resource shared by vports */ 879 struct mutex vport_lock; 880 881 struct hclge_vlan_type_cfg vlan_type_cfg; 882 883 unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; 884 unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; 885 886 unsigned long vport_config_block[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; 887 888 struct hclge_fd_cfg fd_cfg; 889 struct hlist_head fd_rule_list; 890 spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */ 891 u16 hclge_fd_rule_num; 892 unsigned long serv_processed_cnt; 893 unsigned long last_serv_processed; 894 unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)]; 895 enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type; 896 u8 fd_en; 897 898 u16 wanted_umv_size; 899 /* max available unicast mac vlan space */ 900 u16 max_umv_size; 901 /* private unicast mac vlan space, it's same for PF and its VFs */ 902 u16 priv_umv_size; 903 /* unicast mac vlan space shared by PF and its VFs */ 904 u16 share_umv_size; 905 906 DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, 907 HCLGE_MAC_TNL_LOG_SIZE); 908 909 /* affinity mask and notify for misc interrupt */ 910 cpumask_t affinity_mask; 911 struct irq_affinity_notify affinity_notify; 912 }; 913 914 /* VPort level vlan tag configuration for TX direction */ 915 struct hclge_tx_vtag_cfg { 916 bool accept_tag1; /* Whether accept tag1 packet from host */ 917 bool accept_untag1; /* Whether accept untag1 packet from host */ 918 bool accept_tag2; 919 bool accept_untag2; 920 bool insert_tag1_en; /* Whether insert inner vlan tag */ 921 bool insert_tag2_en; /* Whether insert outer vlan tag */ 922 u16 default_tag1; /* The default inner vlan tag to insert */ 923 u16 default_tag2; /* The default outer vlan tag to insert */ 924 bool tag_shift_mode_en; 925 }; 926 927 /* VPort level vlan tag configuration for RX direction */ 928 struct hclge_rx_vtag_cfg { 929 bool rx_vlan_offload_en; /* Whether enable rx vlan offload */ 930 bool strip_tag1_en; /* Whether strip inner vlan tag */ 931 bool strip_tag2_en; /* Whether strip outer vlan tag */ 932 bool vlan1_vlan_prionly; /* Inner vlan tag up to descriptor enable */ 933 bool vlan2_vlan_prionly; /* Outer vlan tag up to descriptor enable */ 934 bool strip_tag1_discard_en; /* Inner vlan tag discard for BD enable */ 935 bool strip_tag2_discard_en; /* Outer vlan tag discard for BD enable */ 936 }; 937 938 struct hclge_rss_tuple_cfg { 939 u8 ipv4_tcp_en; 940 u8 ipv4_udp_en; 941 u8 ipv4_sctp_en; 942 u8 ipv4_fragment_en; 943 u8 ipv6_tcp_en; 944 u8 ipv6_udp_en; 945 u8 ipv6_sctp_en; 946 u8 ipv6_fragment_en; 947 }; 948 949 enum HCLGE_VPORT_STATE { 950 HCLGE_VPORT_STATE_ALIVE, 951 HCLGE_VPORT_STATE_MAC_TBL_CHANGE, 952 HCLGE_VPORT_STATE_MAX 953 }; 954 955 struct hclge_vlan_info { 956 u16 vlan_proto; /* so far support 802.1Q only */ 957 u16 qos; 958 u16 vlan_tag; 959 }; 960 961 struct hclge_port_base_vlan_config { 962 u16 state; 963 struct hclge_vlan_info vlan_info; 964 }; 965 966 struct hclge_vf_info { 967 int link_state; 968 u8 mac[ETH_ALEN]; 969 u32 spoofchk; 970 u32 max_tx_rate; 971 u32 trusted; 972 u16 promisc_enable; 973 }; 974 975 struct hclge_vport { 976 u16 alloc_tqps; /* Allocated Tx/Rx queues */ 977 978 u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ 979 /* User configured lookup table entries */ 980 u16 *rss_indirection_tbl; 981 int rss_algo; /* User configured hash algorithm */ 982 /* User configured rss tuple sets */ 983 struct hclge_rss_tuple_cfg rss_tuple_sets; 984 985 u16 alloc_rss_size; 986 987 u16 qs_offset; 988 u32 bw_limit; /* VSI BW Limit (0 = disabled) */ 989 u8 dwrr; 990 991 unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; 992 struct hclge_port_base_vlan_config port_base_vlan_cfg; 993 struct hclge_tx_vtag_cfg txvlan_cfg; 994 struct hclge_rx_vtag_cfg rxvlan_cfg; 995 996 u16 used_umv_num; 997 998 u16 vport_id; 999 struct hclge_dev *back; /* Back reference to associated dev */ 1000 struct hnae3_handle nic; 1001 struct hnae3_handle roce; 1002 1003 unsigned long state; 1004 unsigned long last_active_jiffies; 1005 u32 mps; /* Max packet size */ 1006 struct hclge_vf_info vf_info; 1007 1008 u8 overflow_promisc_flags; 1009 u8 last_promisc_flags; 1010 1011 spinlock_t mac_list_lock; /* protect mac address need to add/detele */ 1012 struct list_head uc_mac_list; /* Store VF unicast table */ 1013 struct list_head mc_mac_list; /* Store VF multicast table */ 1014 struct list_head vlan_list; /* Store VF vlan table */ 1015 }; 1016 1017 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, 1018 bool en_mc_pmc, bool en_bc_pmc); 1019 int hclge_add_uc_addr_common(struct hclge_vport *vport, 1020 const unsigned char *addr); 1021 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 1022 const unsigned char *addr); 1023 int hclge_add_mc_addr_common(struct hclge_vport *vport, 1024 const unsigned char *addr); 1025 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 1026 const unsigned char *addr); 1027 1028 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); 1029 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 1030 int vector_id, bool en, 1031 struct hnae3_ring_chain_node *ring_chain); 1032 1033 static inline int hclge_get_queue_id(struct hnae3_queue *queue) 1034 { 1035 struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q); 1036 1037 return tqp->index; 1038 } 1039 1040 static inline bool hclge_is_reset_pending(struct hclge_dev *hdev) 1041 { 1042 return !!hdev->reset_pending; 1043 } 1044 1045 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); 1046 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); 1047 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 1048 u16 vlan_id, bool is_kill); 1049 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable); 1050 1051 int hclge_buffer_alloc(struct hclge_dev *hdev); 1052 int hclge_rss_init_hw(struct hclge_dev *hdev); 1053 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); 1054 1055 void hclge_mbx_handler(struct hclge_dev *hdev); 1056 int hclge_reset_tqp(struct hnae3_handle *handle); 1057 int hclge_cfg_flowctrl(struct hclge_dev *hdev); 1058 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); 1059 int hclge_vport_start(struct hclge_vport *vport); 1060 void hclge_vport_stop(struct hclge_vport *vport); 1061 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu); 1062 int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf); 1063 int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf, 1064 char *buf, int len); 1065 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); 1066 int hclge_notify_client(struct hclge_dev *hdev, 1067 enum hnae3_reset_notify_type type); 1068 int hclge_update_mac_list(struct hclge_vport *vport, 1069 enum HCLGE_MAC_NODE_STATE state, 1070 enum HCLGE_MAC_ADDR_TYPE mac_type, 1071 const unsigned char *addr); 1072 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, 1073 const u8 *old_addr, const u8 *new_addr); 1074 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, 1075 enum HCLGE_MAC_ADDR_TYPE mac_type); 1076 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); 1077 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); 1078 void hclge_restore_mac_table_common(struct hclge_vport *vport); 1079 void hclge_restore_vport_vlan_table(struct hclge_vport *vport); 1080 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, 1081 struct hclge_vlan_info *vlan_info); 1082 int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, 1083 u16 state, u16 vlan_tag, u16 qos, 1084 u16 vlan_proto); 1085 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time); 1086 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, 1087 struct hclge_desc *desc); 1088 void hclge_report_hw_error(struct hclge_dev *hdev, 1089 enum hnae3_hw_error_type type); 1090 void hclge_inform_vf_promisc_info(struct hclge_vport *vport); 1091 void hclge_dbg_dump_rst_info(struct hclge_dev *hdev); 1092 int hclge_push_vf_link_status(struct hclge_vport *vport); 1093 #endif 1094