1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #ifndef __HCLGE_MAIN_H 5 #define __HCLGE_MAIN_H 6 #include <linux/fs.h> 7 #include <linux/types.h> 8 #include <linux/phy.h> 9 #include <linux/if_vlan.h> 10 11 #include "hclge_cmd.h" 12 #include "hnae3.h" 13 14 #define HCLGE_MOD_VERSION "1.0" 15 #define HCLGE_DRIVER_NAME "hclge" 16 17 #define HCLGE_MAX_PF_NUM 8 18 19 #define HCLGE_INVALID_VPORT 0xffff 20 21 #define HCLGE_PF_CFG_BLOCK_SIZE 32 22 #define HCLGE_PF_CFG_DESC_NUM \ 23 (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES) 24 25 #define HCLGE_VECTOR_REG_BASE 0x20000 26 #define HCLGE_MISC_VECTOR_REG_BASE 0x20400 27 28 #define HCLGE_VECTOR_REG_OFFSET 0x4 29 #define HCLGE_VECTOR_VF_OFFSET 0x100000 30 31 #define HCLGE_RSS_IND_TBL_SIZE 512 32 #define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0) 33 #define HCLGE_RSS_KEY_SIZE 40 34 #define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0 35 #define HCLGE_RSS_HASH_ALGO_SIMPLE 1 36 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2 37 #define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0) 38 #define HCLGE_RSS_CFG_TBL_NUM \ 39 (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE) 40 41 #define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) 42 #define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) 43 #define HCLGE_D_PORT_BIT BIT(0) 44 #define HCLGE_S_PORT_BIT BIT(1) 45 #define HCLGE_D_IP_BIT BIT(2) 46 #define HCLGE_S_IP_BIT BIT(3) 47 #define HCLGE_V_TAG_BIT BIT(4) 48 49 #define HCLGE_RSS_TC_SIZE_0 1 50 #define HCLGE_RSS_TC_SIZE_1 2 51 #define HCLGE_RSS_TC_SIZE_2 4 52 #define HCLGE_RSS_TC_SIZE_3 8 53 #define HCLGE_RSS_TC_SIZE_4 16 54 #define HCLGE_RSS_TC_SIZE_5 32 55 #define HCLGE_RSS_TC_SIZE_6 64 56 #define HCLGE_RSS_TC_SIZE_7 128 57 58 #define HCLGE_UMV_TBL_SIZE 3072 59 #define HCLGE_DEFAULT_UMV_SPACE_PER_PF \ 60 (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM) 61 62 #define HCLGE_TQP_RESET_TRY_TIMES 10 63 64 #define HCLGE_PHY_PAGE_MDIX 0 65 #define HCLGE_PHY_PAGE_COPPER 0 66 67 /* Page Selection Reg. */ 68 #define HCLGE_PHY_PAGE_REG 22 69 70 /* Copper Specific Control Register */ 71 #define HCLGE_PHY_CSC_REG 16 72 73 /* Copper Specific Status Register */ 74 #define HCLGE_PHY_CSS_REG 17 75 76 #define HCLGE_PHY_MDIX_CTRL_S 5 77 #define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5) 78 79 #define HCLGE_PHY_MDIX_STATUS_B 6 80 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11 81 82 /* Factor used to calculate offset and bitmap of VF num */ 83 #define HCLGE_VF_NUM_PER_CMD 64 84 #define HCLGE_VF_NUM_PER_BYTE 8 85 86 enum HLCGE_PORT_TYPE { 87 HOST_PORT, 88 NETWORK_PORT 89 }; 90 91 #define HCLGE_PF_ID_S 0 92 #define HCLGE_PF_ID_M GENMASK(2, 0) 93 #define HCLGE_VF_ID_S 3 94 #define HCLGE_VF_ID_M GENMASK(10, 3) 95 #define HCLGE_PORT_TYPE_B 11 96 #define HCLGE_NETWORK_PORT_ID_S 0 97 #define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0) 98 99 /* Reset related Registers */ 100 #define HCLGE_PF_OTHER_INT_REG 0x20600 101 #define HCLGE_MISC_RESET_STS_REG 0x20700 102 #define HCLGE_MISC_VECTOR_INT_STS 0x20800 103 #define HCLGE_GLOBAL_RESET_REG 0x20A00 104 #define HCLGE_GLOBAL_RESET_BIT 0 105 #define HCLGE_CORE_RESET_BIT 1 106 #define HCLGE_IMP_RESET_BIT 2 107 #define HCLGE_FUN_RST_ING 0x20C00 108 #define HCLGE_FUN_RST_ING_B 0 109 110 /* Vector0 register bits define */ 111 #define HCLGE_VECTOR0_GLOBALRESET_INT_B 5 112 #define HCLGE_VECTOR0_CORERESET_INT_B 6 113 #define HCLGE_VECTOR0_IMPRESET_INT_B 7 114 115 /* Vector0 interrupt CMDQ event source register(RW) */ 116 #define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100 117 /* CMDQ register bits for RX event(=MBX event) */ 118 #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 119 120 #define HCLGE_VECTOR0_IMP_RESET_INT_B 1 121 122 #define HCLGE_MAC_DEFAULT_FRAME \ 123 (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN) 124 #define HCLGE_MAC_MIN_FRAME 64 125 #define HCLGE_MAC_MAX_FRAME 9728 126 127 #define HCLGE_SUPPORT_1G_BIT BIT(0) 128 #define HCLGE_SUPPORT_10G_BIT BIT(1) 129 #define HCLGE_SUPPORT_25G_BIT BIT(2) 130 #define HCLGE_SUPPORT_50G_BIT BIT(3) 131 #define HCLGE_SUPPORT_100G_BIT BIT(4) 132 133 enum HCLGE_DEV_STATE { 134 HCLGE_STATE_REINITING, 135 HCLGE_STATE_DOWN, 136 HCLGE_STATE_DISABLED, 137 HCLGE_STATE_REMOVING, 138 HCLGE_STATE_SERVICE_INITED, 139 HCLGE_STATE_SERVICE_SCHED, 140 HCLGE_STATE_RST_SERVICE_SCHED, 141 HCLGE_STATE_RST_HANDLING, 142 HCLGE_STATE_MBX_SERVICE_SCHED, 143 HCLGE_STATE_MBX_HANDLING, 144 HCLGE_STATE_STATISTICS_UPDATING, 145 HCLGE_STATE_CMD_DISABLE, 146 HCLGE_STATE_MAX 147 }; 148 149 enum hclge_evt_cause { 150 HCLGE_VECTOR0_EVENT_RST, 151 HCLGE_VECTOR0_EVENT_MBX, 152 HCLGE_VECTOR0_EVENT_OTHER, 153 }; 154 155 #define HCLGE_MPF_ENBALE 1 156 157 enum HCLGE_MAC_SPEED { 158 HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ 159 HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */ 160 HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */ 161 HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */ 162 HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */ 163 HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */ 164 HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */ 165 HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */ 166 }; 167 168 enum HCLGE_MAC_DUPLEX { 169 HCLGE_MAC_HALF, 170 HCLGE_MAC_FULL 171 }; 172 173 struct hclge_mac { 174 u8 phy_addr; 175 u8 flag; 176 u8 media_type; 177 u8 mac_addr[ETH_ALEN]; 178 u8 autoneg; 179 u8 duplex; 180 u32 speed; 181 int link; /* store the link status of mac & phy (if phy exit)*/ 182 struct phy_device *phydev; 183 struct mii_bus *mdio_bus; 184 phy_interface_t phy_if; 185 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); 186 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 187 }; 188 189 struct hclge_hw { 190 void __iomem *io_base; 191 struct hclge_mac mac; 192 int num_vec; 193 struct hclge_cmq cmq; 194 }; 195 196 /* TQP stats */ 197 struct hlcge_tqp_stats { 198 /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ 199 u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ 200 /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ 201 u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ 202 }; 203 204 struct hclge_tqp { 205 /* copy of device pointer from pci_dev, 206 * used when perform DMA mapping 207 */ 208 struct device *dev; 209 struct hnae3_queue q; 210 struct hlcge_tqp_stats tqp_stats; 211 u16 index; /* Global index in a NIC controller */ 212 213 bool alloced; 214 }; 215 216 enum hclge_fc_mode { 217 HCLGE_FC_NONE, 218 HCLGE_FC_RX_PAUSE, 219 HCLGE_FC_TX_PAUSE, 220 HCLGE_FC_FULL, 221 HCLGE_FC_PFC, 222 HCLGE_FC_DEFAULT 223 }; 224 225 #define HCLGE_PG_NUM 4 226 #define HCLGE_SCH_MODE_SP 0 227 #define HCLGE_SCH_MODE_DWRR 1 228 struct hclge_pg_info { 229 u8 pg_id; 230 u8 pg_sch_mode; /* 0: sp; 1: dwrr */ 231 u8 tc_bit_map; 232 u32 bw_limit; 233 u8 tc_dwrr[HNAE3_MAX_TC]; 234 }; 235 236 struct hclge_tc_info { 237 u8 tc_id; 238 u8 tc_sch_mode; /* 0: sp; 1: dwrr */ 239 u8 pgid; 240 u32 bw_limit; 241 }; 242 243 struct hclge_cfg { 244 u8 vmdq_vport_num; 245 u8 tc_num; 246 u16 tqp_desc_num; 247 u16 rx_buf_len; 248 u16 rss_size_max; 249 u8 phy_addr; 250 u8 media_type; 251 u8 mac_addr[ETH_ALEN]; 252 u8 default_speed; 253 u32 numa_node_map; 254 u8 speed_ability; 255 u16 umv_space; 256 }; 257 258 struct hclge_tm_info { 259 u8 num_tc; 260 u8 num_pg; /* It must be 1 if vNET-Base schd */ 261 u8 pg_dwrr[HCLGE_PG_NUM]; 262 u8 prio_tc[HNAE3_MAX_USER_PRIO]; 263 struct hclge_pg_info pg_info[HCLGE_PG_NUM]; 264 struct hclge_tc_info tc_info[HNAE3_MAX_TC]; 265 enum hclge_fc_mode fc_mode; 266 u8 hw_pfc_map; /* Allow for packet drop or not on this TC */ 267 }; 268 269 struct hclge_comm_stats_str { 270 char desc[ETH_GSTRING_LEN]; 271 unsigned long offset; 272 }; 273 274 /* mac stats ,opcode id: 0x0032 */ 275 struct hclge_mac_stats { 276 u64 mac_tx_mac_pause_num; 277 u64 mac_rx_mac_pause_num; 278 u64 mac_tx_pfc_pri0_pkt_num; 279 u64 mac_tx_pfc_pri1_pkt_num; 280 u64 mac_tx_pfc_pri2_pkt_num; 281 u64 mac_tx_pfc_pri3_pkt_num; 282 u64 mac_tx_pfc_pri4_pkt_num; 283 u64 mac_tx_pfc_pri5_pkt_num; 284 u64 mac_tx_pfc_pri6_pkt_num; 285 u64 mac_tx_pfc_pri7_pkt_num; 286 u64 mac_rx_pfc_pri0_pkt_num; 287 u64 mac_rx_pfc_pri1_pkt_num; 288 u64 mac_rx_pfc_pri2_pkt_num; 289 u64 mac_rx_pfc_pri3_pkt_num; 290 u64 mac_rx_pfc_pri4_pkt_num; 291 u64 mac_rx_pfc_pri5_pkt_num; 292 u64 mac_rx_pfc_pri6_pkt_num; 293 u64 mac_rx_pfc_pri7_pkt_num; 294 u64 mac_tx_total_pkt_num; 295 u64 mac_tx_total_oct_num; 296 u64 mac_tx_good_pkt_num; 297 u64 mac_tx_bad_pkt_num; 298 u64 mac_tx_good_oct_num; 299 u64 mac_tx_bad_oct_num; 300 u64 mac_tx_uni_pkt_num; 301 u64 mac_tx_multi_pkt_num; 302 u64 mac_tx_broad_pkt_num; 303 u64 mac_tx_undersize_pkt_num; 304 u64 mac_tx_oversize_pkt_num; 305 u64 mac_tx_64_oct_pkt_num; 306 u64 mac_tx_65_127_oct_pkt_num; 307 u64 mac_tx_128_255_oct_pkt_num; 308 u64 mac_tx_256_511_oct_pkt_num; 309 u64 mac_tx_512_1023_oct_pkt_num; 310 u64 mac_tx_1024_1518_oct_pkt_num; 311 u64 mac_tx_1519_2047_oct_pkt_num; 312 u64 mac_tx_2048_4095_oct_pkt_num; 313 u64 mac_tx_4096_8191_oct_pkt_num; 314 u64 rsv0; 315 u64 mac_tx_8192_9216_oct_pkt_num; 316 u64 mac_tx_9217_12287_oct_pkt_num; 317 u64 mac_tx_12288_16383_oct_pkt_num; 318 u64 mac_tx_1519_max_good_oct_pkt_num; 319 u64 mac_tx_1519_max_bad_oct_pkt_num; 320 321 u64 mac_rx_total_pkt_num; 322 u64 mac_rx_total_oct_num; 323 u64 mac_rx_good_pkt_num; 324 u64 mac_rx_bad_pkt_num; 325 u64 mac_rx_good_oct_num; 326 u64 mac_rx_bad_oct_num; 327 u64 mac_rx_uni_pkt_num; 328 u64 mac_rx_multi_pkt_num; 329 u64 mac_rx_broad_pkt_num; 330 u64 mac_rx_undersize_pkt_num; 331 u64 mac_rx_oversize_pkt_num; 332 u64 mac_rx_64_oct_pkt_num; 333 u64 mac_rx_65_127_oct_pkt_num; 334 u64 mac_rx_128_255_oct_pkt_num; 335 u64 mac_rx_256_511_oct_pkt_num; 336 u64 mac_rx_512_1023_oct_pkt_num; 337 u64 mac_rx_1024_1518_oct_pkt_num; 338 u64 mac_rx_1519_2047_oct_pkt_num; 339 u64 mac_rx_2048_4095_oct_pkt_num; 340 u64 mac_rx_4096_8191_oct_pkt_num; 341 u64 rsv1; 342 u64 mac_rx_8192_9216_oct_pkt_num; 343 u64 mac_rx_9217_12287_oct_pkt_num; 344 u64 mac_rx_12288_16383_oct_pkt_num; 345 u64 mac_rx_1519_max_good_oct_pkt_num; 346 u64 mac_rx_1519_max_bad_oct_pkt_num; 347 348 u64 mac_tx_fragment_pkt_num; 349 u64 mac_tx_undermin_pkt_num; 350 u64 mac_tx_jabber_pkt_num; 351 u64 mac_tx_err_all_pkt_num; 352 u64 mac_tx_from_app_good_pkt_num; 353 u64 mac_tx_from_app_bad_pkt_num; 354 u64 mac_rx_fragment_pkt_num; 355 u64 mac_rx_undermin_pkt_num; 356 u64 mac_rx_jabber_pkt_num; 357 u64 mac_rx_fcs_err_pkt_num; 358 u64 mac_rx_send_app_good_pkt_num; 359 u64 mac_rx_send_app_bad_pkt_num; 360 }; 361 362 #define HCLGE_STATS_TIMER_INTERVAL (60 * 5) 363 struct hclge_hw_stats { 364 struct hclge_mac_stats mac_stats; 365 u32 stats_timer; 366 }; 367 368 struct hclge_vlan_type_cfg { 369 u16 rx_ot_fst_vlan_type; 370 u16 rx_ot_sec_vlan_type; 371 u16 rx_in_fst_vlan_type; 372 u16 rx_in_sec_vlan_type; 373 u16 tx_ot_vlan_type; 374 u16 tx_in_vlan_type; 375 }; 376 377 enum HCLGE_FD_MODE { 378 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1, 379 HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2, 380 HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1, 381 HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2, 382 }; 383 384 enum HCLGE_FD_KEY_TYPE { 385 HCLGE_FD_KEY_BASE_ON_PTYPE, 386 HCLGE_FD_KEY_BASE_ON_TUPLE, 387 }; 388 389 enum HCLGE_FD_STAGE { 390 HCLGE_FD_STAGE_1, 391 HCLGE_FD_STAGE_2, 392 }; 393 394 /* OUTER_XXX indicates tuples in tunnel header of tunnel packet 395 * INNER_XXX indicate tuples in tunneled header of tunnel packet or 396 * tuples of non-tunnel packet 397 */ 398 enum HCLGE_FD_TUPLE { 399 OUTER_DST_MAC, 400 OUTER_SRC_MAC, 401 OUTER_VLAN_TAG_FST, 402 OUTER_VLAN_TAG_SEC, 403 OUTER_ETH_TYPE, 404 OUTER_L2_RSV, 405 OUTER_IP_TOS, 406 OUTER_IP_PROTO, 407 OUTER_SRC_IP, 408 OUTER_DST_IP, 409 OUTER_L3_RSV, 410 OUTER_SRC_PORT, 411 OUTER_DST_PORT, 412 OUTER_L4_RSV, 413 OUTER_TUN_VNI, 414 OUTER_TUN_FLOW_ID, 415 INNER_DST_MAC, 416 INNER_SRC_MAC, 417 INNER_VLAN_TAG_FST, 418 INNER_VLAN_TAG_SEC, 419 INNER_ETH_TYPE, 420 INNER_L2_RSV, 421 INNER_IP_TOS, 422 INNER_IP_PROTO, 423 INNER_SRC_IP, 424 INNER_DST_IP, 425 INNER_L3_RSV, 426 INNER_SRC_PORT, 427 INNER_DST_PORT, 428 INNER_L4_RSV, 429 MAX_TUPLE, 430 }; 431 432 enum HCLGE_FD_META_DATA { 433 PACKET_TYPE_ID, 434 IP_FRAGEMENT, 435 ROCE_TYPE, 436 NEXT_KEY, 437 VLAN_NUMBER, 438 SRC_VPORT, 439 DST_VPORT, 440 TUNNEL_PACKET, 441 MAX_META_DATA, 442 }; 443 444 struct key_info { 445 u8 key_type; 446 u8 key_length; 447 }; 448 449 static const struct key_info meta_data_key_info[] = { 450 { PACKET_TYPE_ID, 6}, 451 { IP_FRAGEMENT, 1}, 452 { ROCE_TYPE, 1}, 453 { NEXT_KEY, 5}, 454 { VLAN_NUMBER, 2}, 455 { SRC_VPORT, 12}, 456 { DST_VPORT, 12}, 457 { TUNNEL_PACKET, 1}, 458 }; 459 460 static const struct key_info tuple_key_info[] = { 461 { OUTER_DST_MAC, 48}, 462 { OUTER_SRC_MAC, 48}, 463 { OUTER_VLAN_TAG_FST, 16}, 464 { OUTER_VLAN_TAG_SEC, 16}, 465 { OUTER_ETH_TYPE, 16}, 466 { OUTER_L2_RSV, 16}, 467 { OUTER_IP_TOS, 8}, 468 { OUTER_IP_PROTO, 8}, 469 { OUTER_SRC_IP, 32}, 470 { OUTER_DST_IP, 32}, 471 { OUTER_L3_RSV, 16}, 472 { OUTER_SRC_PORT, 16}, 473 { OUTER_DST_PORT, 16}, 474 { OUTER_L4_RSV, 32}, 475 { OUTER_TUN_VNI, 24}, 476 { OUTER_TUN_FLOW_ID, 8}, 477 { INNER_DST_MAC, 48}, 478 { INNER_SRC_MAC, 48}, 479 { INNER_VLAN_TAG_FST, 16}, 480 { INNER_VLAN_TAG_SEC, 16}, 481 { INNER_ETH_TYPE, 16}, 482 { INNER_L2_RSV, 16}, 483 { INNER_IP_TOS, 8}, 484 { INNER_IP_PROTO, 8}, 485 { INNER_SRC_IP, 32}, 486 { INNER_DST_IP, 32}, 487 { INNER_L3_RSV, 16}, 488 { INNER_SRC_PORT, 16}, 489 { INNER_DST_PORT, 16}, 490 { INNER_L4_RSV, 32}, 491 }; 492 493 #define MAX_KEY_LENGTH 400 494 #define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4) 495 #define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) 496 #define MAX_META_DATA_LENGTH 32 497 498 enum HCLGE_FD_PACKET_TYPE { 499 NIC_PACKET, 500 ROCE_PACKET, 501 }; 502 503 enum HCLGE_FD_ACTION { 504 HCLGE_FD_ACTION_ACCEPT_PACKET, 505 HCLGE_FD_ACTION_DROP_PACKET, 506 }; 507 508 struct hclge_fd_key_cfg { 509 u8 key_sel; 510 u8 inner_sipv6_word_en; 511 u8 inner_dipv6_word_en; 512 u8 outer_sipv6_word_en; 513 u8 outer_dipv6_word_en; 514 u32 tuple_active; 515 u32 meta_data_active; 516 }; 517 518 struct hclge_fd_cfg { 519 u8 fd_mode; 520 u8 fd_en; 521 u16 max_key_length; 522 u32 proto_support; 523 u32 rule_num[2]; /* rule entry number */ 524 u16 cnt_num[2]; /* rule hit counter number */ 525 struct hclge_fd_key_cfg key_cfg[2]; 526 }; 527 528 struct hclge_fd_rule_tuples { 529 u8 src_mac[6]; 530 u8 dst_mac[6]; 531 u32 src_ip[4]; 532 u32 dst_ip[4]; 533 u16 src_port; 534 u16 dst_port; 535 u16 vlan_tag1; 536 u16 ether_proto; 537 u8 ip_tos; 538 u8 ip_proto; 539 }; 540 541 struct hclge_fd_rule { 542 struct hlist_node rule_node; 543 struct hclge_fd_rule_tuples tuples; 544 struct hclge_fd_rule_tuples tuples_mask; 545 u32 unused_tuple; 546 u32 flow_type; 547 u8 action; 548 u16 vf_id; 549 u16 queue_id; 550 u16 location; 551 }; 552 553 struct hclge_fd_ad_data { 554 u16 ad_id; 555 u8 drop_packet; 556 u8 forward_to_direct_queue; 557 u16 queue_id; 558 u8 use_counter; 559 u8 counter_id; 560 u8 use_next_stage; 561 u8 write_rule_id_to_bd; 562 u8 next_input_key; 563 u16 rule_id; 564 }; 565 566 /* For each bit of TCAM entry, it uses a pair of 'x' and 567 * 'y' to indicate which value to match, like below: 568 * ---------------------------------- 569 * | bit x | bit y | search value | 570 * ---------------------------------- 571 * | 0 | 0 | always hit | 572 * ---------------------------------- 573 * | 1 | 0 | match '0' | 574 * ---------------------------------- 575 * | 0 | 1 | match '1' | 576 * ---------------------------------- 577 * | 1 | 1 | invalid | 578 * ---------------------------------- 579 * Then for input key(k) and mask(v), we can calculate the value by 580 * the formulae: 581 * x = (~k) & v 582 * y = (k ^ ~v) & k 583 */ 584 #define calc_x(x, k, v) ((x) = (~(k) & (v))) 585 #define calc_y(y, k, v) \ 586 do { \ 587 const typeof(k) _k_ = (k); \ 588 const typeof(v) _v_ = (v); \ 589 (y) = (_k_ ^ ~_v_) & (_k_); \ 590 } while (0) 591 592 #define HCLGE_VPORT_NUM 256 593 struct hclge_dev { 594 struct pci_dev *pdev; 595 struct hnae3_ae_dev *ae_dev; 596 struct hclge_hw hw; 597 struct hclge_misc_vector misc_vector; 598 struct hclge_hw_stats hw_stats; 599 unsigned long state; 600 unsigned long flr_state; 601 unsigned long last_reset_time; 602 603 enum hnae3_reset_type reset_type; 604 enum hnae3_reset_type reset_level; 605 unsigned long default_reset_request; 606 unsigned long reset_request; /* reset has been requested */ 607 unsigned long reset_pending; /* client rst is pending to be served */ 608 unsigned long reset_count; /* the number of reset has been done */ 609 u32 reset_fail_cnt; 610 u32 fw_version; 611 u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ 612 u16 num_tqps; /* Num task queue pairs of this PF */ 613 u16 num_req_vfs; /* Num VFs requested for this PF */ 614 615 u16 base_tqp_pid; /* Base task tqp physical id of this PF */ 616 u16 alloc_rss_size; /* Allocated RSS task queue */ 617 u16 rss_size_max; /* HW defined max RSS task queue */ 618 619 u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */ 620 u16 num_alloc_vport; /* Num vports this driver supports */ 621 u32 numa_node_mask; 622 u16 rx_buf_len; 623 u16 num_desc; 624 u8 hw_tc_map; 625 u8 tc_num_last_time; 626 enum hclge_fc_mode fc_mode_last_time; 627 628 #define HCLGE_FLAG_TC_BASE_SCH_MODE 1 629 #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 630 u8 tx_sch_mode; 631 u8 tc_max; 632 u8 pfc_max; 633 634 u8 default_up; 635 u8 dcbx_cap; 636 struct hclge_tm_info tm_info; 637 638 u16 num_msi; 639 u16 num_msi_left; 640 u16 num_msi_used; 641 u16 roce_base_msix_offset; 642 u32 base_msi_vector; 643 u16 *vector_status; 644 int *vector_irq; 645 u16 num_roce_msi; /* Num of roce vectors for this PF */ 646 int roce_base_vector; 647 648 u16 pending_udp_bitmap; 649 650 u16 rx_itr_default; 651 u16 tx_itr_default; 652 653 u16 adminq_work_limit; /* Num of admin receive queue desc to process */ 654 unsigned long service_timer_period; 655 unsigned long service_timer_previous; 656 struct timer_list service_timer; 657 struct timer_list reset_timer; 658 struct work_struct service_task; 659 struct work_struct rst_service_task; 660 struct work_struct mbx_service_task; 661 662 bool cur_promisc; 663 int num_alloc_vfs; /* Actual number of VFs allocated */ 664 665 struct hclge_tqp *htqp; 666 struct hclge_vport *vport; 667 668 struct dentry *hclge_dbgfs; 669 670 struct hnae3_client *nic_client; 671 struct hnae3_client *roce_client; 672 673 #define HCLGE_FLAG_MAIN BIT(0) 674 #define HCLGE_FLAG_DCB_CAPABLE BIT(1) 675 #define HCLGE_FLAG_DCB_ENABLE BIT(2) 676 #define HCLGE_FLAG_MQPRIO_ENABLE BIT(3) 677 u32 flag; 678 679 u32 pkt_buf_size; /* Total pf buf size for tx/rx */ 680 u32 mps; /* Max packet size */ 681 /* vport_lock protect resource shared by vports */ 682 struct mutex vport_lock; 683 684 struct hclge_vlan_type_cfg vlan_type_cfg; 685 686 unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; 687 688 struct hclge_fd_cfg fd_cfg; 689 struct hlist_head fd_rule_list; 690 u16 hclge_fd_rule_num; 691 692 u16 wanted_umv_size; 693 /* max available unicast mac vlan space */ 694 u16 max_umv_size; 695 /* private unicast mac vlan space, it's same for PF and its VFs */ 696 u16 priv_umv_size; 697 /* unicast mac vlan space shared by PF and its VFs */ 698 u16 share_umv_size; 699 struct mutex umv_mutex; /* protect share_umv_size */ 700 }; 701 702 /* VPort level vlan tag configuration for TX direction */ 703 struct hclge_tx_vtag_cfg { 704 bool accept_tag1; /* Whether accept tag1 packet from host */ 705 bool accept_untag1; /* Whether accept untag1 packet from host */ 706 bool accept_tag2; 707 bool accept_untag2; 708 bool insert_tag1_en; /* Whether insert inner vlan tag */ 709 bool insert_tag2_en; /* Whether insert outer vlan tag */ 710 u16 default_tag1; /* The default inner vlan tag to insert */ 711 u16 default_tag2; /* The default outer vlan tag to insert */ 712 }; 713 714 /* VPort level vlan tag configuration for RX direction */ 715 struct hclge_rx_vtag_cfg { 716 bool strip_tag1_en; /* Whether strip inner vlan tag */ 717 bool strip_tag2_en; /* Whether strip outer vlan tag */ 718 bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */ 719 bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */ 720 }; 721 722 struct hclge_rss_tuple_cfg { 723 u8 ipv4_tcp_en; 724 u8 ipv4_udp_en; 725 u8 ipv4_sctp_en; 726 u8 ipv4_fragment_en; 727 u8 ipv6_tcp_en; 728 u8 ipv6_udp_en; 729 u8 ipv6_sctp_en; 730 u8 ipv6_fragment_en; 731 }; 732 733 enum HCLGE_VPORT_STATE { 734 HCLGE_VPORT_STATE_ALIVE, 735 HCLGE_VPORT_STATE_MAX 736 }; 737 738 struct hclge_vport { 739 u16 alloc_tqps; /* Allocated Tx/Rx queues */ 740 741 u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ 742 /* User configured lookup table entries */ 743 u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; 744 int rss_algo; /* User configured hash algorithm */ 745 /* User configured rss tuple sets */ 746 struct hclge_rss_tuple_cfg rss_tuple_sets; 747 748 u16 alloc_rss_size; 749 750 u16 qs_offset; 751 u16 bw_limit; /* VSI BW Limit (0 = disabled) */ 752 u8 dwrr; 753 754 struct hclge_tx_vtag_cfg txvlan_cfg; 755 struct hclge_rx_vtag_cfg rxvlan_cfg; 756 757 u16 used_umv_num; 758 759 int vport_id; 760 struct hclge_dev *back; /* Back reference to associated dev */ 761 struct hnae3_handle nic; 762 struct hnae3_handle roce; 763 764 unsigned long state; 765 unsigned long last_active_jiffies; 766 u32 mps; /* Max packet size */ 767 }; 768 769 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, 770 bool en_mc, bool en_bc, int vport_id); 771 772 int hclge_add_uc_addr_common(struct hclge_vport *vport, 773 const unsigned char *addr); 774 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 775 const unsigned char *addr); 776 int hclge_add_mc_addr_common(struct hclge_vport *vport, 777 const unsigned char *addr); 778 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 779 const unsigned char *addr); 780 781 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); 782 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 783 int vector_id, bool en, 784 struct hnae3_ring_chain_node *ring_chain); 785 786 static inline int hclge_get_queue_id(struct hnae3_queue *queue) 787 { 788 struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q); 789 790 return tqp->index; 791 } 792 793 static inline bool hclge_is_reset_pending(struct hclge_dev *hdev) 794 { 795 return !!hdev->reset_pending; 796 } 797 798 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); 799 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); 800 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 801 u16 vlan_id, bool is_kill); 802 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable); 803 804 int hclge_buffer_alloc(struct hclge_dev *hdev); 805 int hclge_rss_init_hw(struct hclge_dev *hdev); 806 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); 807 808 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); 809 void hclge_mbx_handler(struct hclge_dev *hdev); 810 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); 811 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id); 812 int hclge_cfg_flowctrl(struct hclge_dev *hdev); 813 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); 814 int hclge_vport_start(struct hclge_vport *vport); 815 void hclge_vport_stop(struct hclge_vport *vport); 816 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu); 817 #endif 818