1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* Copyright (c) 2016-2017 Hisilicon Limited. */ 3 4 #ifndef __HCLGEVF_MAIN_H 5 #define __HCLGEVF_MAIN_H 6 #include <linux/fs.h> 7 #include <linux/types.h> 8 #include "hclge_mbx.h" 9 #include "hclgevf_cmd.h" 10 #include "hnae3.h" 11 12 #define HCLGEVF_MOD_VERSION "1.0" 13 #define HCLGEVF_DRIVER_NAME "hclgevf" 14 15 #define HCLGEVF_MISC_VECTOR_NUM 0 16 17 #define HCLGEVF_INVALID_VPORT 0xffff 18 19 /* This number in actual depends upon the total number of VFs 20 * created by physical function. But the maximum number of 21 * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}. 22 */ 23 #define HCLGEVF_MAX_VF_VECTOR_NUM (32 + 1) 24 25 #define HCLGEVF_VECTOR_REG_BASE 0x20000 26 #define HCLGEVF_MISC_VECTOR_REG_BASE 0x20400 27 #define HCLGEVF_VECTOR_REG_OFFSET 0x4 28 #define HCLGEVF_VECTOR_VF_OFFSET 0x100000 29 30 /* Vector0 interrupt CMDQ event source register(RW) */ 31 #define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100 32 /* CMDQ register bits for RX event(=MBX event) */ 33 #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1 34 35 #define HCLGEVF_TQP_RESET_TRY_TIMES 10 36 /* Reset related Registers */ 37 #define HCLGEVF_FUN_RST_ING 0x20C00 38 #define HCLGEVF_FUN_RST_ING_B 0 39 40 #define HCLGEVF_RSS_IND_TBL_SIZE 512 41 #define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff 42 #define HCLGEVF_RSS_KEY_SIZE 40 43 #define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0 44 #define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1 45 #define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2 46 #define HCLGEVF_RSS_HASH_ALGO_MASK 0xf 47 #define HCLGEVF_RSS_CFG_TBL_NUM \ 48 (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE) 49 #define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) 50 #define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) 51 #define HCLGEVF_D_PORT_BIT BIT(0) 52 #define HCLGEVF_S_PORT_BIT BIT(1) 53 #define HCLGEVF_D_IP_BIT BIT(2) 54 #define HCLGEVF_S_IP_BIT BIT(3) 55 #define HCLGEVF_V_TAG_BIT BIT(4) 56 57 /* states of hclgevf device & tasks */ 58 enum hclgevf_states { 59 /* device states */ 60 HCLGEVF_STATE_DOWN, 61 HCLGEVF_STATE_DISABLED, 62 /* task states */ 63 HCLGEVF_STATE_SERVICE_SCHED, 64 HCLGEVF_STATE_RST_SERVICE_SCHED, 65 HCLGEVF_STATE_RST_HANDLING, 66 HCLGEVF_STATE_MBX_SERVICE_SCHED, 67 HCLGEVF_STATE_MBX_HANDLING, 68 }; 69 70 #define HCLGEVF_MPF_ENBALE 1 71 72 struct hclgevf_mac { 73 u8 media_type; 74 u8 mac_addr[ETH_ALEN]; 75 int link; 76 u8 duplex; 77 u32 speed; 78 }; 79 80 struct hclgevf_hw { 81 void __iomem *io_base; 82 int num_vec; 83 struct hclgevf_cmq cmq; 84 struct hclgevf_mac mac; 85 void *hdev; /* hchgevf device it is part of */ 86 }; 87 88 /* TQP stats */ 89 struct hlcgevf_tqp_stats { 90 /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ 91 u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ 92 /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ 93 u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ 94 }; 95 96 struct hclgevf_tqp { 97 struct device *dev; /* device for DMA mapping */ 98 struct hnae3_queue q; 99 struct hlcgevf_tqp_stats tqp_stats; 100 u16 index; /* global index in a NIC controller */ 101 102 bool alloced; 103 }; 104 105 struct hclgevf_cfg { 106 u8 vmdq_vport_num; 107 u8 tc_num; 108 u16 tqp_desc_num; 109 u16 rx_buf_len; 110 u8 phy_addr; 111 u8 media_type; 112 u8 mac_addr[ETH_ALEN]; 113 u32 numa_node_map; 114 }; 115 116 struct hclgevf_rss_tuple_cfg { 117 u8 ipv4_tcp_en; 118 u8 ipv4_udp_en; 119 u8 ipv4_sctp_en; 120 u8 ipv4_fragment_en; 121 u8 ipv6_tcp_en; 122 u8 ipv6_udp_en; 123 u8 ipv6_sctp_en; 124 u8 ipv6_fragment_en; 125 }; 126 127 struct hclgevf_rss_cfg { 128 u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */ 129 u32 hash_algo; 130 u32 rss_size; 131 u8 hw_tc_map; 132 u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */ 133 struct hclgevf_rss_tuple_cfg rss_tuple_sets; 134 }; 135 136 struct hclgevf_misc_vector { 137 u8 __iomem *addr; 138 int vector_irq; 139 }; 140 141 struct hclgevf_dev { 142 struct pci_dev *pdev; 143 struct hnae3_ae_dev *ae_dev; 144 struct hclgevf_hw hw; 145 struct hclgevf_misc_vector misc_vector; 146 struct hclgevf_rss_cfg rss_cfg; 147 unsigned long state; 148 149 #define HCLGEVF_RESET_REQUESTED 0 150 #define HCLGEVF_RESET_PENDING 1 151 unsigned long reset_state; /* requested, pending */ 152 u32 reset_attempts; 153 154 u32 fw_version; 155 u16 num_tqps; /* num task queue pairs of this PF */ 156 157 u16 alloc_rss_size; /* allocated RSS task queue */ 158 u16 rss_size_max; /* HW defined max RSS task queue */ 159 160 u16 num_alloc_vport; /* num vports this driver supports */ 161 u32 numa_node_mask; 162 u16 rx_buf_len; 163 u16 num_desc; 164 u8 hw_tc_map; 165 166 u16 num_msi; 167 u16 num_msi_left; 168 u16 num_msi_used; 169 u16 num_roce_msix; /* Num of roce vectors for this VF */ 170 u16 roce_base_msix_offset; 171 int roce_base_vector; 172 u32 base_msi_vector; 173 u16 *vector_status; 174 int *vector_irq; 175 176 bool mbx_event_pending; 177 struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ 178 struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ 179 180 struct timer_list service_timer; 181 struct work_struct service_task; 182 struct work_struct rst_service_task; 183 struct work_struct mbx_service_task; 184 185 struct hclgevf_tqp *htqp; 186 187 struct hnae3_handle nic; 188 struct hnae3_handle roce; 189 190 struct hnae3_client *nic_client; 191 struct hnae3_client *roce_client; 192 u32 flag; 193 }; 194 195 static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev) 196 { 197 return (hdev && 198 (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) && 199 (hdev->nic.reset_level == HNAE3_VF_RESET)); 200 } 201 202 static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev) 203 { 204 return (hdev && 205 (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) && 206 (hdev->nic.reset_level == HNAE3_VF_FULL_RESET)); 207 } 208 209 int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, 210 const u8 *msg_data, u8 msg_len, bool need_resp, 211 u8 *resp_data, u16 resp_len); 212 void hclgevf_mbx_handler(struct hclgevf_dev *hdev); 213 void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev); 214 215 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state); 216 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 217 u8 duplex); 218 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); 219 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); 220 #endif 221