1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* Copyright (c) 2016-2017 Hisilicon Limited. */ 3 4 #ifndef __HCLGEVF_MAIN_H 5 #define __HCLGEVF_MAIN_H 6 #include <linux/fs.h> 7 #include <linux/if_vlan.h> 8 #include <linux/types.h> 9 #include <net/devlink.h> 10 #include "hclge_mbx.h" 11 #include "hclgevf_cmd.h" 12 #include "hnae3.h" 13 14 #define HCLGEVF_MOD_VERSION "1.0" 15 #define HCLGEVF_DRIVER_NAME "hclgevf" 16 17 #define HCLGEVF_MAX_VLAN_ID 4095 18 #define HCLGEVF_MISC_VECTOR_NUM 0 19 20 #define HCLGEVF_INVALID_VPORT 0xffff 21 #define HCLGEVF_GENERAL_TASK_INTERVAL 5 22 #define HCLGEVF_KEEP_ALIVE_TASK_INTERVAL 2 23 24 /* This number in actual depends upon the total number of VFs 25 * created by physical function. But the maximum number of 26 * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}. 27 */ 28 #define HCLGEVF_MAX_VF_VECTOR_NUM (32 + 1) 29 30 #define HCLGEVF_VECTOR_REG_BASE 0x20000 31 #define HCLGEVF_MISC_VECTOR_REG_BASE 0x20400 32 #define HCLGEVF_VECTOR_REG_OFFSET 0x4 33 #define HCLGEVF_VECTOR_VF_OFFSET 0x100000 34 35 /* bar registers for cmdq */ 36 #define HCLGEVF_CMDQ_TX_ADDR_L_REG 0x27000 37 #define HCLGEVF_CMDQ_TX_ADDR_H_REG 0x27004 38 #define HCLGEVF_CMDQ_TX_DEPTH_REG 0x27008 39 #define HCLGEVF_CMDQ_TX_TAIL_REG 0x27010 40 #define HCLGEVF_CMDQ_TX_HEAD_REG 0x27014 41 #define HCLGEVF_CMDQ_RX_ADDR_L_REG 0x27018 42 #define HCLGEVF_CMDQ_RX_ADDR_H_REG 0x2701C 43 #define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020 44 #define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024 45 #define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028 46 #define HCLGEVF_CMDQ_INTR_EN_REG 0x27108 47 #define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C 48 49 /* bar registers for common func */ 50 #define HCLGEVF_GRO_EN_REG 0x28000 51 #define HCLGEVF_RXD_ADV_LAYOUT_EN_REG 0x28008 52 53 /* bar registers for rcb */ 54 #define HCLGEVF_RING_RX_ADDR_L_REG 0x80000 55 #define HCLGEVF_RING_RX_ADDR_H_REG 0x80004 56 #define HCLGEVF_RING_RX_BD_NUM_REG 0x80008 57 #define HCLGEVF_RING_RX_BD_LENGTH_REG 0x8000C 58 #define HCLGEVF_RING_RX_MERGE_EN_REG 0x80014 59 #define HCLGEVF_RING_RX_TAIL_REG 0x80018 60 #define HCLGEVF_RING_RX_HEAD_REG 0x8001C 61 #define HCLGEVF_RING_RX_FBD_NUM_REG 0x80020 62 #define HCLGEVF_RING_RX_OFFSET_REG 0x80024 63 #define HCLGEVF_RING_RX_FBD_OFFSET_REG 0x80028 64 #define HCLGEVF_RING_RX_STASH_REG 0x80030 65 #define HCLGEVF_RING_RX_BD_ERR_REG 0x80034 66 #define HCLGEVF_RING_TX_ADDR_L_REG 0x80040 67 #define HCLGEVF_RING_TX_ADDR_H_REG 0x80044 68 #define HCLGEVF_RING_TX_BD_NUM_REG 0x80048 69 #define HCLGEVF_RING_TX_PRIORITY_REG 0x8004C 70 #define HCLGEVF_RING_TX_TC_REG 0x80050 71 #define HCLGEVF_RING_TX_MERGE_EN_REG 0x80054 72 #define HCLGEVF_RING_TX_TAIL_REG 0x80058 73 #define HCLGEVF_RING_TX_HEAD_REG 0x8005C 74 #define HCLGEVF_RING_TX_FBD_NUM_REG 0x80060 75 #define HCLGEVF_RING_TX_OFFSET_REG 0x80064 76 #define HCLGEVF_RING_TX_EBD_NUM_REG 0x80068 77 #define HCLGEVF_RING_TX_EBD_OFFSET_REG 0x80070 78 #define HCLGEVF_RING_TX_BD_ERR_REG 0x80074 79 #define HCLGEVF_RING_EN_REG 0x80090 80 81 /* bar registers for tqp interrupt */ 82 #define HCLGEVF_TQP_INTR_CTRL_REG 0x20000 83 #define HCLGEVF_TQP_INTR_GL0_REG 0x20100 84 #define HCLGEVF_TQP_INTR_GL1_REG 0x20200 85 #define HCLGEVF_TQP_INTR_GL2_REG 0x20300 86 #define HCLGEVF_TQP_INTR_RL_REG 0x20900 87 88 /* Vector0 interrupt CMDQ event source register(RW) */ 89 #define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100 90 /* Vector0 interrupt CMDQ event status register(RO) */ 91 #define HCLGEVF_VECTOR0_CMDQ_STATE_REG 0x27104 92 /* CMDQ register bits for RX event(=MBX event) */ 93 #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1 94 /* RST register bits for RESET event */ 95 #define HCLGEVF_VECTOR0_RST_INT_B 2 96 97 #define HCLGEVF_TQP_RESET_TRY_TIMES 10 98 /* Reset related Registers */ 99 #define HCLGEVF_RST_ING 0x20C00 100 #define HCLGEVF_FUN_RST_ING_BIT BIT(0) 101 #define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5) 102 #define HCLGEVF_CORE_RST_ING_BIT BIT(6) 103 #define HCLGEVF_IMP_RST_ING_BIT BIT(7) 104 #define HCLGEVF_RST_ING_BITS \ 105 (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \ 106 HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT) 107 108 #define HCLGEVF_VF_RST_ING 0x07008 109 #define HCLGEVF_VF_RST_ING_BIT BIT(16) 110 111 #define HCLGEVF_RSS_IND_TBL_SIZE 512 112 #define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff 113 #define HCLGEVF_RSS_KEY_SIZE 40 114 #define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0 115 #define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1 116 #define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2 117 #define HCLGEVF_RSS_HASH_ALGO_MASK 0xf 118 119 #define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) 120 #define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) 121 #define HCLGEVF_D_PORT_BIT BIT(0) 122 #define HCLGEVF_S_PORT_BIT BIT(1) 123 #define HCLGEVF_D_IP_BIT BIT(2) 124 #define HCLGEVF_S_IP_BIT BIT(3) 125 #define HCLGEVF_V_TAG_BIT BIT(4) 126 #define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \ 127 (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT) 128 129 #define HCLGEVF_MAC_MAX_FRAME 9728 130 131 #define HCLGEVF_STATS_TIMER_INTERVAL 36U 132 133 enum hclgevf_evt_cause { 134 HCLGEVF_VECTOR0_EVENT_RST, 135 HCLGEVF_VECTOR0_EVENT_MBX, 136 HCLGEVF_VECTOR0_EVENT_OTHER, 137 }; 138 139 /* states of hclgevf device & tasks */ 140 enum hclgevf_states { 141 /* device states */ 142 HCLGEVF_STATE_DOWN, 143 HCLGEVF_STATE_DISABLED, 144 HCLGEVF_STATE_IRQ_INITED, 145 HCLGEVF_STATE_REMOVING, 146 HCLGEVF_STATE_NIC_REGISTERED, 147 HCLGEVF_STATE_ROCE_REGISTERED, 148 /* task states */ 149 HCLGEVF_STATE_RST_SERVICE_SCHED, 150 HCLGEVF_STATE_RST_HANDLING, 151 HCLGEVF_STATE_MBX_SERVICE_SCHED, 152 HCLGEVF_STATE_MBX_HANDLING, 153 HCLGEVF_STATE_CMD_DISABLE, 154 HCLGEVF_STATE_LINK_UPDATING, 155 HCLGEVF_STATE_PROMISC_CHANGED, 156 HCLGEVF_STATE_RST_FAIL, 157 HCLGEVF_STATE_PF_PUSH_LINK_STATUS, 158 }; 159 160 struct hclgevf_mac { 161 u8 media_type; 162 u8 module_type; 163 u8 mac_addr[ETH_ALEN]; 164 int link; 165 u8 duplex; 166 u32 speed; 167 u64 supported; 168 u64 advertising; 169 }; 170 171 struct hclgevf_hw { 172 void __iomem *io_base; 173 void __iomem *mem_base; 174 int num_vec; 175 struct hclgevf_cmq cmq; 176 struct hclgevf_mac mac; 177 void *hdev; /* hchgevf device it is part of */ 178 }; 179 180 /* TQP stats */ 181 struct hlcgevf_tqp_stats { 182 /* query_tqp_tx_queue_statistics, opcode id: 0x0B03 */ 183 u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ 184 /* query_tqp_rx_queue_statistics, opcode id: 0x0B13 */ 185 u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ 186 }; 187 188 struct hclgevf_tqp { 189 struct device *dev; /* device for DMA mapping */ 190 struct hnae3_queue q; 191 struct hlcgevf_tqp_stats tqp_stats; 192 u16 index; /* global index in a NIC controller */ 193 194 bool alloced; 195 }; 196 197 struct hclgevf_cfg { 198 u8 tc_num; 199 u16 tqp_desc_num; 200 u16 rx_buf_len; 201 u8 phy_addr; 202 u8 media_type; 203 u8 mac_addr[ETH_ALEN]; 204 u32 numa_node_map; 205 }; 206 207 struct hclgevf_rss_tuple_cfg { 208 u8 ipv4_tcp_en; 209 u8 ipv4_udp_en; 210 u8 ipv4_sctp_en; 211 u8 ipv4_fragment_en; 212 u8 ipv6_tcp_en; 213 u8 ipv6_udp_en; 214 u8 ipv6_sctp_en; 215 u8 ipv6_fragment_en; 216 }; 217 218 struct hclgevf_rss_cfg { 219 u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */ 220 u32 hash_algo; 221 u32 rss_size; 222 u8 hw_tc_map; 223 /* shadow table */ 224 u8 *rss_indirection_tbl; 225 struct hclgevf_rss_tuple_cfg rss_tuple_sets; 226 }; 227 228 struct hclgevf_misc_vector { 229 u8 __iomem *addr; 230 int vector_irq; 231 char name[HNAE3_INT_NAME_LEN]; 232 }; 233 234 struct hclgevf_rst_stats { 235 u32 rst_cnt; /* the number of reset */ 236 u32 vf_func_rst_cnt; /* the number of VF function reset */ 237 u32 flr_rst_cnt; /* the number of FLR */ 238 u32 vf_rst_cnt; /* the number of VF reset */ 239 u32 rst_done_cnt; /* the number of reset completed */ 240 u32 hw_rst_done_cnt; /* the number of HW reset completed */ 241 u32 rst_fail_cnt; /* the number of VF reset fail */ 242 }; 243 244 enum HCLGEVF_MAC_ADDR_TYPE { 245 HCLGEVF_MAC_ADDR_UC, 246 HCLGEVF_MAC_ADDR_MC 247 }; 248 249 enum HCLGEVF_MAC_NODE_STATE { 250 HCLGEVF_MAC_TO_ADD, 251 HCLGEVF_MAC_TO_DEL, 252 HCLGEVF_MAC_ACTIVE 253 }; 254 255 struct hclgevf_mac_addr_node { 256 struct list_head node; 257 enum HCLGEVF_MAC_NODE_STATE state; 258 u8 mac_addr[ETH_ALEN]; 259 }; 260 261 struct hclgevf_mac_table_cfg { 262 spinlock_t mac_list_lock; /* protect mac address need to add/detele */ 263 struct list_head uc_mac_list; 264 struct list_head mc_mac_list; 265 }; 266 267 struct hclgevf_dev { 268 struct pci_dev *pdev; 269 struct hnae3_ae_dev *ae_dev; 270 struct hclgevf_hw hw; 271 struct hclgevf_misc_vector misc_vector; 272 struct hclgevf_rss_cfg rss_cfg; 273 unsigned long state; 274 unsigned long flr_state; 275 unsigned long default_reset_request; 276 unsigned long last_reset_time; 277 enum hnae3_reset_type reset_level; 278 unsigned long reset_pending; 279 enum hnae3_reset_type reset_type; 280 281 #define HCLGEVF_RESET_REQUESTED 0 282 #define HCLGEVF_RESET_PENDING 1 283 unsigned long reset_state; /* requested, pending */ 284 struct hclgevf_rst_stats rst_stats; 285 u32 reset_attempts; 286 struct semaphore reset_sem; /* protect reset process */ 287 288 u32 fw_version; 289 u16 mbx_api_version; 290 u16 num_tqps; /* num task queue pairs of this VF */ 291 292 u16 alloc_rss_size; /* allocated RSS task queue */ 293 u16 rss_size_max; /* HW defined max RSS task queue */ 294 295 u16 num_alloc_vport; /* num vports this driver supports */ 296 u32 numa_node_mask; 297 u16 rx_buf_len; 298 u16 num_tx_desc; /* desc num of per tx queue */ 299 u16 num_rx_desc; /* desc num of per rx queue */ 300 u8 hw_tc_map; 301 u8 has_pf_mac; 302 303 u16 num_msi; 304 u16 num_msi_left; 305 u16 num_msi_used; 306 u16 num_nic_msix; /* Num of nic vectors for this VF */ 307 u16 num_roce_msix; /* Num of roce vectors for this VF */ 308 u16 roce_base_msix_offset; 309 int roce_base_vector; 310 u32 base_msi_vector; 311 u16 *vector_status; 312 int *vector_irq; 313 314 unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; 315 316 struct hclgevf_mac_table_cfg mac_table; 317 318 bool mbx_event_pending; 319 struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ 320 struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ 321 322 struct delayed_work service_task; 323 324 struct hclgevf_tqp *htqp; 325 326 struct hnae3_handle nic; 327 struct hnae3_handle roce; 328 329 struct hnae3_client *nic_client; 330 struct hnae3_client *roce_client; 331 u32 flag; 332 unsigned long serv_processed_cnt; 333 unsigned long last_serv_processed; 334 335 struct devlink *devlink; 336 }; 337 338 static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) 339 { 340 return !!hdev->reset_pending; 341 } 342 343 int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, 344 struct hclge_vf_to_pf_msg *send_msg, bool need_resp, 345 u8 *resp_data, u16 resp_len); 346 void hclgevf_mbx_handler(struct hclgevf_dev *hdev); 347 void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev); 348 349 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state); 350 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 351 u8 duplex); 352 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); 353 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); 354 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 355 u8 *port_base_vlan_info, u8 data_size); 356 #endif 357