1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #ifndef __HCLGE_MAIN_H 5 #define __HCLGE_MAIN_H 6 #include <linux/fs.h> 7 #include <linux/types.h> 8 #include <linux/phy.h> 9 #include <linux/if_vlan.h> 10 11 #include "hclge_cmd.h" 12 #include "hnae3.h" 13 14 #define HCLGE_MOD_VERSION "1.0" 15 #define HCLGE_DRIVER_NAME "hclge" 16 17 #define HCLGE_MAX_PF_NUM 8 18 19 #define HCLGE_INVALID_VPORT 0xffff 20 21 #define HCLGE_PF_CFG_BLOCK_SIZE 32 22 #define HCLGE_PF_CFG_DESC_NUM \ 23 (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES) 24 25 #define HCLGE_VECTOR_REG_BASE 0x20000 26 #define HCLGE_MISC_VECTOR_REG_BASE 0x20400 27 28 #define HCLGE_VECTOR_REG_OFFSET 0x4 29 #define HCLGE_VECTOR_VF_OFFSET 0x100000 30 31 #define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000 32 #define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004 33 #define HCLGE_CMDQ_TX_DEPTH_REG 0x27008 34 #define HCLGE_CMDQ_TX_TAIL_REG 0x27010 35 #define HCLGE_CMDQ_TX_HEAD_REG 0x27014 36 #define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018 37 #define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C 38 #define HCLGE_CMDQ_RX_DEPTH_REG 0x27020 39 #define HCLGE_CMDQ_RX_TAIL_REG 0x27024 40 #define HCLGE_CMDQ_RX_HEAD_REG 0x27028 41 #define HCLGE_CMDQ_INTR_SRC_REG 0x27100 42 #define HCLGE_CMDQ_INTR_STS_REG 0x27104 43 #define HCLGE_CMDQ_INTR_EN_REG 0x27108 44 #define HCLGE_CMDQ_INTR_GEN_REG 0x2710C 45 46 /* bar registers for common func */ 47 #define HCLGE_VECTOR0_OTER_EN_REG 0x20600 48 #define HCLGE_RAS_OTHER_STS_REG 0x20B00 49 #define HCLGE_FUNC_RESET_STS_REG 0x20C00 50 #define HCLGE_GRO_EN_REG 0x28000 51 52 /* bar registers for rcb */ 53 #define HCLGE_RING_RX_ADDR_L_REG 0x80000 54 #define HCLGE_RING_RX_ADDR_H_REG 0x80004 55 #define HCLGE_RING_RX_BD_NUM_REG 0x80008 56 #define HCLGE_RING_RX_BD_LENGTH_REG 0x8000C 57 #define HCLGE_RING_RX_MERGE_EN_REG 0x80014 58 #define HCLGE_RING_RX_TAIL_REG 0x80018 59 #define HCLGE_RING_RX_HEAD_REG 0x8001C 60 #define HCLGE_RING_RX_FBD_NUM_REG 0x80020 61 #define HCLGE_RING_RX_OFFSET_REG 0x80024 62 #define HCLGE_RING_RX_FBD_OFFSET_REG 0x80028 63 #define HCLGE_RING_RX_STASH_REG 0x80030 64 #define HCLGE_RING_RX_BD_ERR_REG 0x80034 65 #define HCLGE_RING_TX_ADDR_L_REG 0x80040 66 #define HCLGE_RING_TX_ADDR_H_REG 0x80044 67 #define HCLGE_RING_TX_BD_NUM_REG 0x80048 68 #define HCLGE_RING_TX_PRIORITY_REG 0x8004C 69 #define HCLGE_RING_TX_TC_REG 0x80050 70 #define HCLGE_RING_TX_MERGE_EN_REG 0x80054 71 #define HCLGE_RING_TX_TAIL_REG 0x80058 72 #define HCLGE_RING_TX_HEAD_REG 0x8005C 73 #define HCLGE_RING_TX_FBD_NUM_REG 0x80060 74 #define HCLGE_RING_TX_OFFSET_REG 0x80064 75 #define HCLGE_RING_TX_EBD_NUM_REG 0x80068 76 #define HCLGE_RING_TX_EBD_OFFSET_REG 0x80070 77 #define HCLGE_RING_TX_BD_ERR_REG 0x80074 78 #define HCLGE_RING_EN_REG 0x80090 79 80 /* bar registers for tqp interrupt */ 81 #define HCLGE_TQP_INTR_CTRL_REG 0x20000 82 #define HCLGE_TQP_INTR_GL0_REG 0x20100 83 #define HCLGE_TQP_INTR_GL1_REG 0x20200 84 #define HCLGE_TQP_INTR_GL2_REG 0x20300 85 #define HCLGE_TQP_INTR_RL_REG 0x20900 86 87 #define HCLGE_RSS_IND_TBL_SIZE 512 88 #define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0) 89 #define HCLGE_RSS_KEY_SIZE 40 90 #define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0 91 #define HCLGE_RSS_HASH_ALGO_SIMPLE 1 92 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2 93 #define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0) 94 #define HCLGE_RSS_CFG_TBL_NUM \ 95 (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE) 96 97 #define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) 98 #define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) 99 #define HCLGE_D_PORT_BIT BIT(0) 100 #define HCLGE_S_PORT_BIT BIT(1) 101 #define HCLGE_D_IP_BIT BIT(2) 102 #define HCLGE_S_IP_BIT BIT(3) 103 #define HCLGE_V_TAG_BIT BIT(4) 104 105 #define HCLGE_RSS_TC_SIZE_0 1 106 #define HCLGE_RSS_TC_SIZE_1 2 107 #define HCLGE_RSS_TC_SIZE_2 4 108 #define HCLGE_RSS_TC_SIZE_3 8 109 #define HCLGE_RSS_TC_SIZE_4 16 110 #define HCLGE_RSS_TC_SIZE_5 32 111 #define HCLGE_RSS_TC_SIZE_6 64 112 #define HCLGE_RSS_TC_SIZE_7 128 113 114 #define HCLGE_UMV_TBL_SIZE 3072 115 #define HCLGE_DEFAULT_UMV_SPACE_PER_PF \ 116 (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM) 117 118 #define HCLGE_TQP_RESET_TRY_TIMES 10 119 120 #define HCLGE_PHY_PAGE_MDIX 0 121 #define HCLGE_PHY_PAGE_COPPER 0 122 123 /* Page Selection Reg. */ 124 #define HCLGE_PHY_PAGE_REG 22 125 126 /* Copper Specific Control Register */ 127 #define HCLGE_PHY_CSC_REG 16 128 129 /* Copper Specific Status Register */ 130 #define HCLGE_PHY_CSS_REG 17 131 132 #define HCLGE_PHY_MDIX_CTRL_S 5 133 #define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5) 134 135 #define HCLGE_PHY_MDIX_STATUS_B 6 136 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11 137 138 /* Factor used to calculate offset and bitmap of VF num */ 139 #define HCLGE_VF_NUM_PER_CMD 64 140 #define HCLGE_VF_NUM_PER_BYTE 8 141 142 enum HLCGE_PORT_TYPE { 143 HOST_PORT, 144 NETWORK_PORT 145 }; 146 147 #define HCLGE_PF_ID_S 0 148 #define HCLGE_PF_ID_M GENMASK(2, 0) 149 #define HCLGE_VF_ID_S 3 150 #define HCLGE_VF_ID_M GENMASK(10, 3) 151 #define HCLGE_PORT_TYPE_B 11 152 #define HCLGE_NETWORK_PORT_ID_S 0 153 #define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0) 154 155 /* Reset related Registers */ 156 #define HCLGE_PF_OTHER_INT_REG 0x20600 157 #define HCLGE_MISC_RESET_STS_REG 0x20700 158 #define HCLGE_MISC_VECTOR_INT_STS 0x20800 159 #define HCLGE_GLOBAL_RESET_REG 0x20A00 160 #define HCLGE_GLOBAL_RESET_BIT 0 161 #define HCLGE_CORE_RESET_BIT 1 162 #define HCLGE_IMP_RESET_BIT 2 163 #define HCLGE_FUN_RST_ING 0x20C00 164 #define HCLGE_FUN_RST_ING_B 0 165 166 /* Vector0 register bits define */ 167 #define HCLGE_VECTOR0_GLOBALRESET_INT_B 5 168 #define HCLGE_VECTOR0_CORERESET_INT_B 6 169 #define HCLGE_VECTOR0_IMPRESET_INT_B 7 170 171 /* Vector0 interrupt CMDQ event source register(RW) */ 172 #define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100 173 /* CMDQ register bits for RX event(=MBX event) */ 174 #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 175 176 #define HCLGE_VECTOR0_IMP_RESET_INT_B 1 177 178 #define HCLGE_MAC_DEFAULT_FRAME \ 179 (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN) 180 #define HCLGE_MAC_MIN_FRAME 64 181 #define HCLGE_MAC_MAX_FRAME 9728 182 183 #define HCLGE_SUPPORT_1G_BIT BIT(0) 184 #define HCLGE_SUPPORT_10G_BIT BIT(1) 185 #define HCLGE_SUPPORT_25G_BIT BIT(2) 186 #define HCLGE_SUPPORT_50G_BIT BIT(3) 187 #define HCLGE_SUPPORT_100G_BIT BIT(4) 188 189 enum HCLGE_DEV_STATE { 190 HCLGE_STATE_REINITING, 191 HCLGE_STATE_DOWN, 192 HCLGE_STATE_DISABLED, 193 HCLGE_STATE_REMOVING, 194 HCLGE_STATE_SERVICE_INITED, 195 HCLGE_STATE_SERVICE_SCHED, 196 HCLGE_STATE_RST_SERVICE_SCHED, 197 HCLGE_STATE_RST_HANDLING, 198 HCLGE_STATE_MBX_SERVICE_SCHED, 199 HCLGE_STATE_MBX_HANDLING, 200 HCLGE_STATE_STATISTICS_UPDATING, 201 HCLGE_STATE_CMD_DISABLE, 202 HCLGE_STATE_MAX 203 }; 204 205 enum hclge_evt_cause { 206 HCLGE_VECTOR0_EVENT_RST, 207 HCLGE_VECTOR0_EVENT_MBX, 208 HCLGE_VECTOR0_EVENT_ERR, 209 HCLGE_VECTOR0_EVENT_OTHER, 210 }; 211 212 #define HCLGE_MPF_ENBALE 1 213 214 enum HCLGE_MAC_SPEED { 215 HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */ 216 HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ 217 HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */ 218 HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */ 219 HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */ 220 HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */ 221 HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */ 222 HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */ 223 HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */ 224 }; 225 226 enum HCLGE_MAC_DUPLEX { 227 HCLGE_MAC_HALF, 228 HCLGE_MAC_FULL 229 }; 230 231 struct hclge_mac { 232 u8 phy_addr; 233 u8 flag; 234 u8 media_type; 235 u8 mac_addr[ETH_ALEN]; 236 u8 autoneg; 237 u8 duplex; 238 u32 speed; 239 int link; /* store the link status of mac & phy (if phy exit)*/ 240 struct phy_device *phydev; 241 struct mii_bus *mdio_bus; 242 phy_interface_t phy_if; 243 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); 244 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 245 }; 246 247 struct hclge_hw { 248 void __iomem *io_base; 249 struct hclge_mac mac; 250 int num_vec; 251 struct hclge_cmq cmq; 252 }; 253 254 /* TQP stats */ 255 struct hlcge_tqp_stats { 256 /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ 257 u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ 258 /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ 259 u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ 260 }; 261 262 struct hclge_tqp { 263 /* copy of device pointer from pci_dev, 264 * used when perform DMA mapping 265 */ 266 struct device *dev; 267 struct hnae3_queue q; 268 struct hlcge_tqp_stats tqp_stats; 269 u16 index; /* Global index in a NIC controller */ 270 271 bool alloced; 272 }; 273 274 enum hclge_fc_mode { 275 HCLGE_FC_NONE, 276 HCLGE_FC_RX_PAUSE, 277 HCLGE_FC_TX_PAUSE, 278 HCLGE_FC_FULL, 279 HCLGE_FC_PFC, 280 HCLGE_FC_DEFAULT 281 }; 282 283 #define HCLGE_PG_NUM 4 284 #define HCLGE_SCH_MODE_SP 0 285 #define HCLGE_SCH_MODE_DWRR 1 286 struct hclge_pg_info { 287 u8 pg_id; 288 u8 pg_sch_mode; /* 0: sp; 1: dwrr */ 289 u8 tc_bit_map; 290 u32 bw_limit; 291 u8 tc_dwrr[HNAE3_MAX_TC]; 292 }; 293 294 struct hclge_tc_info { 295 u8 tc_id; 296 u8 tc_sch_mode; /* 0: sp; 1: dwrr */ 297 u8 pgid; 298 u32 bw_limit; 299 }; 300 301 struct hclge_cfg { 302 u8 vmdq_vport_num; 303 u8 tc_num; 304 u16 tqp_desc_num; 305 u16 rx_buf_len; 306 u16 rss_size_max; 307 u8 phy_addr; 308 u8 media_type; 309 u8 mac_addr[ETH_ALEN]; 310 u8 default_speed; 311 u32 numa_node_map; 312 u8 speed_ability; 313 u16 umv_space; 314 }; 315 316 struct hclge_tm_info { 317 u8 num_tc; 318 u8 num_pg; /* It must be 1 if vNET-Base schd */ 319 u8 pg_dwrr[HCLGE_PG_NUM]; 320 u8 prio_tc[HNAE3_MAX_USER_PRIO]; 321 struct hclge_pg_info pg_info[HCLGE_PG_NUM]; 322 struct hclge_tc_info tc_info[HNAE3_MAX_TC]; 323 enum hclge_fc_mode fc_mode; 324 u8 hw_pfc_map; /* Allow for packet drop or not on this TC */ 325 }; 326 327 struct hclge_comm_stats_str { 328 char desc[ETH_GSTRING_LEN]; 329 unsigned long offset; 330 }; 331 332 /* mac stats ,opcode id: 0x0032 */ 333 struct hclge_mac_stats { 334 u64 mac_tx_mac_pause_num; 335 u64 mac_rx_mac_pause_num; 336 u64 mac_tx_pfc_pri0_pkt_num; 337 u64 mac_tx_pfc_pri1_pkt_num; 338 u64 mac_tx_pfc_pri2_pkt_num; 339 u64 mac_tx_pfc_pri3_pkt_num; 340 u64 mac_tx_pfc_pri4_pkt_num; 341 u64 mac_tx_pfc_pri5_pkt_num; 342 u64 mac_tx_pfc_pri6_pkt_num; 343 u64 mac_tx_pfc_pri7_pkt_num; 344 u64 mac_rx_pfc_pri0_pkt_num; 345 u64 mac_rx_pfc_pri1_pkt_num; 346 u64 mac_rx_pfc_pri2_pkt_num; 347 u64 mac_rx_pfc_pri3_pkt_num; 348 u64 mac_rx_pfc_pri4_pkt_num; 349 u64 mac_rx_pfc_pri5_pkt_num; 350 u64 mac_rx_pfc_pri6_pkt_num; 351 u64 mac_rx_pfc_pri7_pkt_num; 352 u64 mac_tx_total_pkt_num; 353 u64 mac_tx_total_oct_num; 354 u64 mac_tx_good_pkt_num; 355 u64 mac_tx_bad_pkt_num; 356 u64 mac_tx_good_oct_num; 357 u64 mac_tx_bad_oct_num; 358 u64 mac_tx_uni_pkt_num; 359 u64 mac_tx_multi_pkt_num; 360 u64 mac_tx_broad_pkt_num; 361 u64 mac_tx_undersize_pkt_num; 362 u64 mac_tx_oversize_pkt_num; 363 u64 mac_tx_64_oct_pkt_num; 364 u64 mac_tx_65_127_oct_pkt_num; 365 u64 mac_tx_128_255_oct_pkt_num; 366 u64 mac_tx_256_511_oct_pkt_num; 367 u64 mac_tx_512_1023_oct_pkt_num; 368 u64 mac_tx_1024_1518_oct_pkt_num; 369 u64 mac_tx_1519_2047_oct_pkt_num; 370 u64 mac_tx_2048_4095_oct_pkt_num; 371 u64 mac_tx_4096_8191_oct_pkt_num; 372 u64 rsv0; 373 u64 mac_tx_8192_9216_oct_pkt_num; 374 u64 mac_tx_9217_12287_oct_pkt_num; 375 u64 mac_tx_12288_16383_oct_pkt_num; 376 u64 mac_tx_1519_max_good_oct_pkt_num; 377 u64 mac_tx_1519_max_bad_oct_pkt_num; 378 379 u64 mac_rx_total_pkt_num; 380 u64 mac_rx_total_oct_num; 381 u64 mac_rx_good_pkt_num; 382 u64 mac_rx_bad_pkt_num; 383 u64 mac_rx_good_oct_num; 384 u64 mac_rx_bad_oct_num; 385 u64 mac_rx_uni_pkt_num; 386 u64 mac_rx_multi_pkt_num; 387 u64 mac_rx_broad_pkt_num; 388 u64 mac_rx_undersize_pkt_num; 389 u64 mac_rx_oversize_pkt_num; 390 u64 mac_rx_64_oct_pkt_num; 391 u64 mac_rx_65_127_oct_pkt_num; 392 u64 mac_rx_128_255_oct_pkt_num; 393 u64 mac_rx_256_511_oct_pkt_num; 394 u64 mac_rx_512_1023_oct_pkt_num; 395 u64 mac_rx_1024_1518_oct_pkt_num; 396 u64 mac_rx_1519_2047_oct_pkt_num; 397 u64 mac_rx_2048_4095_oct_pkt_num; 398 u64 mac_rx_4096_8191_oct_pkt_num; 399 u64 rsv1; 400 u64 mac_rx_8192_9216_oct_pkt_num; 401 u64 mac_rx_9217_12287_oct_pkt_num; 402 u64 mac_rx_12288_16383_oct_pkt_num; 403 u64 mac_rx_1519_max_good_oct_pkt_num; 404 u64 mac_rx_1519_max_bad_oct_pkt_num; 405 406 u64 mac_tx_fragment_pkt_num; 407 u64 mac_tx_undermin_pkt_num; 408 u64 mac_tx_jabber_pkt_num; 409 u64 mac_tx_err_all_pkt_num; 410 u64 mac_tx_from_app_good_pkt_num; 411 u64 mac_tx_from_app_bad_pkt_num; 412 u64 mac_rx_fragment_pkt_num; 413 u64 mac_rx_undermin_pkt_num; 414 u64 mac_rx_jabber_pkt_num; 415 u64 mac_rx_fcs_err_pkt_num; 416 u64 mac_rx_send_app_good_pkt_num; 417 u64 mac_rx_send_app_bad_pkt_num; 418 }; 419 420 #define HCLGE_STATS_TIMER_INTERVAL (60 * 5) 421 struct hclge_hw_stats { 422 struct hclge_mac_stats mac_stats; 423 u32 stats_timer; 424 }; 425 426 struct hclge_vlan_type_cfg { 427 u16 rx_ot_fst_vlan_type; 428 u16 rx_ot_sec_vlan_type; 429 u16 rx_in_fst_vlan_type; 430 u16 rx_in_sec_vlan_type; 431 u16 tx_ot_vlan_type; 432 u16 tx_in_vlan_type; 433 }; 434 435 enum HCLGE_FD_MODE { 436 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1, 437 HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2, 438 HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1, 439 HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2, 440 }; 441 442 enum HCLGE_FD_KEY_TYPE { 443 HCLGE_FD_KEY_BASE_ON_PTYPE, 444 HCLGE_FD_KEY_BASE_ON_TUPLE, 445 }; 446 447 enum HCLGE_FD_STAGE { 448 HCLGE_FD_STAGE_1, 449 HCLGE_FD_STAGE_2, 450 }; 451 452 /* OUTER_XXX indicates tuples in tunnel header of tunnel packet 453 * INNER_XXX indicate tuples in tunneled header of tunnel packet or 454 * tuples of non-tunnel packet 455 */ 456 enum HCLGE_FD_TUPLE { 457 OUTER_DST_MAC, 458 OUTER_SRC_MAC, 459 OUTER_VLAN_TAG_FST, 460 OUTER_VLAN_TAG_SEC, 461 OUTER_ETH_TYPE, 462 OUTER_L2_RSV, 463 OUTER_IP_TOS, 464 OUTER_IP_PROTO, 465 OUTER_SRC_IP, 466 OUTER_DST_IP, 467 OUTER_L3_RSV, 468 OUTER_SRC_PORT, 469 OUTER_DST_PORT, 470 OUTER_L4_RSV, 471 OUTER_TUN_VNI, 472 OUTER_TUN_FLOW_ID, 473 INNER_DST_MAC, 474 INNER_SRC_MAC, 475 INNER_VLAN_TAG_FST, 476 INNER_VLAN_TAG_SEC, 477 INNER_ETH_TYPE, 478 INNER_L2_RSV, 479 INNER_IP_TOS, 480 INNER_IP_PROTO, 481 INNER_SRC_IP, 482 INNER_DST_IP, 483 INNER_L3_RSV, 484 INNER_SRC_PORT, 485 INNER_DST_PORT, 486 INNER_L4_RSV, 487 MAX_TUPLE, 488 }; 489 490 enum HCLGE_FD_META_DATA { 491 PACKET_TYPE_ID, 492 IP_FRAGEMENT, 493 ROCE_TYPE, 494 NEXT_KEY, 495 VLAN_NUMBER, 496 SRC_VPORT, 497 DST_VPORT, 498 TUNNEL_PACKET, 499 MAX_META_DATA, 500 }; 501 502 struct key_info { 503 u8 key_type; 504 u8 key_length; 505 }; 506 507 static const struct key_info meta_data_key_info[] = { 508 { PACKET_TYPE_ID, 6}, 509 { IP_FRAGEMENT, 1}, 510 { ROCE_TYPE, 1}, 511 { NEXT_KEY, 5}, 512 { VLAN_NUMBER, 2}, 513 { SRC_VPORT, 12}, 514 { DST_VPORT, 12}, 515 { TUNNEL_PACKET, 1}, 516 }; 517 518 static const struct key_info tuple_key_info[] = { 519 { OUTER_DST_MAC, 48}, 520 { OUTER_SRC_MAC, 48}, 521 { OUTER_VLAN_TAG_FST, 16}, 522 { OUTER_VLAN_TAG_SEC, 16}, 523 { OUTER_ETH_TYPE, 16}, 524 { OUTER_L2_RSV, 16}, 525 { OUTER_IP_TOS, 8}, 526 { OUTER_IP_PROTO, 8}, 527 { OUTER_SRC_IP, 32}, 528 { OUTER_DST_IP, 32}, 529 { OUTER_L3_RSV, 16}, 530 { OUTER_SRC_PORT, 16}, 531 { OUTER_DST_PORT, 16}, 532 { OUTER_L4_RSV, 32}, 533 { OUTER_TUN_VNI, 24}, 534 { OUTER_TUN_FLOW_ID, 8}, 535 { INNER_DST_MAC, 48}, 536 { INNER_SRC_MAC, 48}, 537 { INNER_VLAN_TAG_FST, 16}, 538 { INNER_VLAN_TAG_SEC, 16}, 539 { INNER_ETH_TYPE, 16}, 540 { INNER_L2_RSV, 16}, 541 { INNER_IP_TOS, 8}, 542 { INNER_IP_PROTO, 8}, 543 { INNER_SRC_IP, 32}, 544 { INNER_DST_IP, 32}, 545 { INNER_L3_RSV, 16}, 546 { INNER_SRC_PORT, 16}, 547 { INNER_DST_PORT, 16}, 548 { INNER_L4_RSV, 32}, 549 }; 550 551 #define MAX_KEY_LENGTH 400 552 #define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4) 553 #define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) 554 #define MAX_META_DATA_LENGTH 32 555 556 enum HCLGE_FD_PACKET_TYPE { 557 NIC_PACKET, 558 ROCE_PACKET, 559 }; 560 561 enum HCLGE_FD_ACTION { 562 HCLGE_FD_ACTION_ACCEPT_PACKET, 563 HCLGE_FD_ACTION_DROP_PACKET, 564 }; 565 566 struct hclge_fd_key_cfg { 567 u8 key_sel; 568 u8 inner_sipv6_word_en; 569 u8 inner_dipv6_word_en; 570 u8 outer_sipv6_word_en; 571 u8 outer_dipv6_word_en; 572 u32 tuple_active; 573 u32 meta_data_active; 574 }; 575 576 struct hclge_fd_cfg { 577 u8 fd_mode; 578 u8 fd_en; 579 u16 max_key_length; 580 u32 proto_support; 581 u32 rule_num[2]; /* rule entry number */ 582 u16 cnt_num[2]; /* rule hit counter number */ 583 struct hclge_fd_key_cfg key_cfg[2]; 584 }; 585 586 struct hclge_fd_rule_tuples { 587 u8 src_mac[6]; 588 u8 dst_mac[6]; 589 u32 src_ip[4]; 590 u32 dst_ip[4]; 591 u16 src_port; 592 u16 dst_port; 593 u16 vlan_tag1; 594 u16 ether_proto; 595 u8 ip_tos; 596 u8 ip_proto; 597 }; 598 599 struct hclge_fd_rule { 600 struct hlist_node rule_node; 601 struct hclge_fd_rule_tuples tuples; 602 struct hclge_fd_rule_tuples tuples_mask; 603 u32 unused_tuple; 604 u32 flow_type; 605 u8 action; 606 u16 vf_id; 607 u16 queue_id; 608 u16 location; 609 }; 610 611 struct hclge_fd_ad_data { 612 u16 ad_id; 613 u8 drop_packet; 614 u8 forward_to_direct_queue; 615 u16 queue_id; 616 u8 use_counter; 617 u8 counter_id; 618 u8 use_next_stage; 619 u8 write_rule_id_to_bd; 620 u8 next_input_key; 621 u16 rule_id; 622 }; 623 624 /* For each bit of TCAM entry, it uses a pair of 'x' and 625 * 'y' to indicate which value to match, like below: 626 * ---------------------------------- 627 * | bit x | bit y | search value | 628 * ---------------------------------- 629 * | 0 | 0 | always hit | 630 * ---------------------------------- 631 * | 1 | 0 | match '0' | 632 * ---------------------------------- 633 * | 0 | 1 | match '1' | 634 * ---------------------------------- 635 * | 1 | 1 | invalid | 636 * ---------------------------------- 637 * Then for input key(k) and mask(v), we can calculate the value by 638 * the formulae: 639 * x = (~k) & v 640 * y = (k ^ ~v) & k 641 */ 642 #define calc_x(x, k, v) ((x) = (~(k) & (v))) 643 #define calc_y(y, k, v) \ 644 do { \ 645 const typeof(k) _k_ = (k); \ 646 const typeof(v) _v_ = (v); \ 647 (y) = (_k_ ^ ~_v_) & (_k_); \ 648 } while (0) 649 650 #define HCLGE_VPORT_NUM 256 651 struct hclge_dev { 652 struct pci_dev *pdev; 653 struct hnae3_ae_dev *ae_dev; 654 struct hclge_hw hw; 655 struct hclge_misc_vector misc_vector; 656 struct hclge_hw_stats hw_stats; 657 unsigned long state; 658 unsigned long flr_state; 659 unsigned long last_reset_time; 660 661 enum hnae3_reset_type reset_type; 662 enum hnae3_reset_type reset_level; 663 unsigned long default_reset_request; 664 unsigned long reset_request; /* reset has been requested */ 665 unsigned long reset_pending; /* client rst is pending to be served */ 666 unsigned long reset_count; /* the number of reset has been done */ 667 u32 reset_fail_cnt; 668 u32 fw_version; 669 u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ 670 u16 num_tqps; /* Num task queue pairs of this PF */ 671 u16 num_req_vfs; /* Num VFs requested for this PF */ 672 673 u16 base_tqp_pid; /* Base task tqp physical id of this PF */ 674 u16 alloc_rss_size; /* Allocated RSS task queue */ 675 u16 rss_size_max; /* HW defined max RSS task queue */ 676 677 u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */ 678 u16 num_alloc_vport; /* Num vports this driver supports */ 679 u32 numa_node_mask; 680 u16 rx_buf_len; 681 u16 num_desc; 682 u8 hw_tc_map; 683 u8 tc_num_last_time; 684 enum hclge_fc_mode fc_mode_last_time; 685 u8 support_sfp_query; 686 687 #define HCLGE_FLAG_TC_BASE_SCH_MODE 1 688 #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 689 u8 tx_sch_mode; 690 u8 tc_max; 691 u8 pfc_max; 692 693 u8 default_up; 694 u8 dcbx_cap; 695 struct hclge_tm_info tm_info; 696 697 u16 num_msi; 698 u16 num_msi_left; 699 u16 num_msi_used; 700 u16 roce_base_msix_offset; 701 u32 base_msi_vector; 702 u16 *vector_status; 703 int *vector_irq; 704 u16 num_roce_msi; /* Num of roce vectors for this PF */ 705 int roce_base_vector; 706 707 u16 pending_udp_bitmap; 708 709 u16 rx_itr_default; 710 u16 tx_itr_default; 711 712 u16 adminq_work_limit; /* Num of admin receive queue desc to process */ 713 unsigned long service_timer_period; 714 unsigned long service_timer_previous; 715 struct timer_list service_timer; 716 struct timer_list reset_timer; 717 struct work_struct service_task; 718 struct work_struct rst_service_task; 719 struct work_struct mbx_service_task; 720 721 bool cur_promisc; 722 int num_alloc_vfs; /* Actual number of VFs allocated */ 723 724 struct hclge_tqp *htqp; 725 struct hclge_vport *vport; 726 727 struct dentry *hclge_dbgfs; 728 729 struct hnae3_client *nic_client; 730 struct hnae3_client *roce_client; 731 732 #define HCLGE_FLAG_MAIN BIT(0) 733 #define HCLGE_FLAG_DCB_CAPABLE BIT(1) 734 #define HCLGE_FLAG_DCB_ENABLE BIT(2) 735 #define HCLGE_FLAG_MQPRIO_ENABLE BIT(3) 736 u32 flag; 737 738 u32 pkt_buf_size; /* Total pf buf size for tx/rx */ 739 u32 tx_buf_size; /* Tx buffer size for each TC */ 740 u32 dv_buf_size; /* Dv buffer size for each TC */ 741 742 u32 mps; /* Max packet size */ 743 /* vport_lock protect resource shared by vports */ 744 struct mutex vport_lock; 745 746 struct hclge_vlan_type_cfg vlan_type_cfg; 747 748 unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; 749 750 struct hclge_fd_cfg fd_cfg; 751 struct hlist_head fd_rule_list; 752 u16 hclge_fd_rule_num; 753 754 u16 wanted_umv_size; 755 /* max available unicast mac vlan space */ 756 u16 max_umv_size; 757 /* private unicast mac vlan space, it's same for PF and its VFs */ 758 u16 priv_umv_size; 759 /* unicast mac vlan space shared by PF and its VFs */ 760 u16 share_umv_size; 761 struct mutex umv_mutex; /* protect share_umv_size */ 762 }; 763 764 /* VPort level vlan tag configuration for TX direction */ 765 struct hclge_tx_vtag_cfg { 766 bool accept_tag1; /* Whether accept tag1 packet from host */ 767 bool accept_untag1; /* Whether accept untag1 packet from host */ 768 bool accept_tag2; 769 bool accept_untag2; 770 bool insert_tag1_en; /* Whether insert inner vlan tag */ 771 bool insert_tag2_en; /* Whether insert outer vlan tag */ 772 u16 default_tag1; /* The default inner vlan tag to insert */ 773 u16 default_tag2; /* The default outer vlan tag to insert */ 774 }; 775 776 /* VPort level vlan tag configuration for RX direction */ 777 struct hclge_rx_vtag_cfg { 778 bool strip_tag1_en; /* Whether strip inner vlan tag */ 779 bool strip_tag2_en; /* Whether strip outer vlan tag */ 780 bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */ 781 bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */ 782 }; 783 784 struct hclge_rss_tuple_cfg { 785 u8 ipv4_tcp_en; 786 u8 ipv4_udp_en; 787 u8 ipv4_sctp_en; 788 u8 ipv4_fragment_en; 789 u8 ipv6_tcp_en; 790 u8 ipv6_udp_en; 791 u8 ipv6_sctp_en; 792 u8 ipv6_fragment_en; 793 }; 794 795 enum HCLGE_VPORT_STATE { 796 HCLGE_VPORT_STATE_ALIVE, 797 HCLGE_VPORT_STATE_MAX 798 }; 799 800 struct hclge_vport { 801 u16 alloc_tqps; /* Allocated Tx/Rx queues */ 802 803 u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ 804 /* User configured lookup table entries */ 805 u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; 806 int rss_algo; /* User configured hash algorithm */ 807 /* User configured rss tuple sets */ 808 struct hclge_rss_tuple_cfg rss_tuple_sets; 809 810 u16 alloc_rss_size; 811 812 u16 qs_offset; 813 u16 bw_limit; /* VSI BW Limit (0 = disabled) */ 814 u8 dwrr; 815 816 struct hclge_tx_vtag_cfg txvlan_cfg; 817 struct hclge_rx_vtag_cfg rxvlan_cfg; 818 819 u16 used_umv_num; 820 821 int vport_id; 822 struct hclge_dev *back; /* Back reference to associated dev */ 823 struct hnae3_handle nic; 824 struct hnae3_handle roce; 825 826 unsigned long state; 827 unsigned long last_active_jiffies; 828 u32 mps; /* Max packet size */ 829 }; 830 831 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, 832 bool en_mc, bool en_bc, int vport_id); 833 834 int hclge_add_uc_addr_common(struct hclge_vport *vport, 835 const unsigned char *addr); 836 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 837 const unsigned char *addr); 838 int hclge_add_mc_addr_common(struct hclge_vport *vport, 839 const unsigned char *addr); 840 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 841 const unsigned char *addr); 842 843 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); 844 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 845 int vector_id, bool en, 846 struct hnae3_ring_chain_node *ring_chain); 847 848 static inline int hclge_get_queue_id(struct hnae3_queue *queue) 849 { 850 struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q); 851 852 return tqp->index; 853 } 854 855 static inline bool hclge_is_reset_pending(struct hclge_dev *hdev) 856 { 857 return !!hdev->reset_pending; 858 } 859 860 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); 861 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); 862 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 863 u16 vlan_id, bool is_kill); 864 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable); 865 866 int hclge_buffer_alloc(struct hclge_dev *hdev); 867 int hclge_rss_init_hw(struct hclge_dev *hdev); 868 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); 869 870 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); 871 void hclge_mbx_handler(struct hclge_dev *hdev); 872 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); 873 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id); 874 int hclge_cfg_flowctrl(struct hclge_dev *hdev); 875 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); 876 int hclge_vport_start(struct hclge_vport *vport); 877 void hclge_vport_stop(struct hclge_vport *vport); 878 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu); 879 int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf); 880 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); 881 #endif 882