1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* Copyright (c) 2016-2017 Hisilicon Limited. */ 3 4 #ifndef __HCLGEVF_CMD_H 5 #define __HCLGEVF_CMD_H 6 #include <linux/io.h> 7 #include <linux/types.h> 8 #include "hnae3.h" 9 10 #define HCLGEVF_CMDQ_TX_TIMEOUT 30000 11 #define HCLGEVF_CMDQ_CLEAR_WAIT_TIME 200 12 #define HCLGEVF_CMDQ_RX_INVLD_B 0 13 #define HCLGEVF_CMDQ_RX_OUTVLD_B 1 14 15 struct hclgevf_hw; 16 struct hclgevf_dev; 17 18 struct hclgevf_desc { 19 __le16 opcode; 20 __le16 flag; 21 __le16 retval; 22 __le16 rsv; 23 __le32 data[6]; 24 }; 25 26 struct hclgevf_desc_cb { 27 dma_addr_t dma; 28 void *va; 29 u32 length; 30 }; 31 32 struct hclgevf_cmq_ring { 33 dma_addr_t desc_dma_addr; 34 struct hclgevf_desc *desc; 35 struct hclgevf_desc_cb *desc_cb; 36 struct hclgevf_dev *dev; 37 u32 head; 38 u32 tail; 39 40 u16 buf_size; 41 u16 desc_num; 42 int next_to_use; 43 int next_to_clean; 44 u8 flag; 45 spinlock_t lock; /* Command queue lock */ 46 }; 47 48 enum hclgevf_cmd_return_status { 49 HCLGEVF_CMD_EXEC_SUCCESS = 0, 50 HCLGEVF_CMD_NO_AUTH = 1, 51 HCLGEVF_CMD_NOT_SUPPORTED = 2, 52 HCLGEVF_CMD_QUEUE_FULL = 3, 53 HCLGEVF_CMD_NEXT_ERR = 4, 54 HCLGEVF_CMD_UNEXE_ERR = 5, 55 HCLGEVF_CMD_PARA_ERR = 6, 56 HCLGEVF_CMD_RESULT_ERR = 7, 57 HCLGEVF_CMD_TIMEOUT = 8, 58 HCLGEVF_CMD_HILINK_ERR = 9, 59 HCLGEVF_CMD_QUEUE_ILLEGAL = 10, 60 HCLGEVF_CMD_INVALID = 11, 61 }; 62 63 enum hclgevf_cmd_status { 64 HCLGEVF_STATUS_SUCCESS = 0, 65 HCLGEVF_ERR_CSQ_FULL = -1, 66 HCLGEVF_ERR_CSQ_TIMEOUT = -2, 67 HCLGEVF_ERR_CSQ_ERROR = -3 68 }; 69 70 struct hclgevf_cmq { 71 struct hclgevf_cmq_ring csq; 72 struct hclgevf_cmq_ring crq; 73 u16 tx_timeout; /* Tx timeout */ 74 enum hclgevf_cmd_status last_status; 75 }; 76 77 #define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0 78 #define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1 79 #define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2 80 #define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3 81 #define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4 82 #define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5 83 84 #define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT) 85 #define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT) 86 #define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT) 87 #define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT) 88 #define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT) 89 #define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT) 90 91 enum hclgevf_opcode_type { 92 /* Generic command */ 93 HCLGEVF_OPC_QUERY_FW_VER = 0x0001, 94 HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024, 95 HCLGEVF_OPC_QUERY_DEV_SPECS = 0x0050, 96 97 /* TQP command */ 98 HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03, 99 HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13, 100 HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20, 101 /* GRO command */ 102 HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10, 103 /* RSS cmd */ 104 HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01, 105 HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02, 106 HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07, 107 HCLGEVF_OPC_RSS_TC_MODE = 0x0D08, 108 /* Mailbox cmd */ 109 HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001, 110 }; 111 112 #define HCLGEVF_TQP_REG_OFFSET 0x80000 113 #define HCLGEVF_TQP_REG_SIZE 0x200 114 115 #define HCLGEVF_TQP_MAX_SIZE_DEV_V2 1024 116 #define HCLGEVF_TQP_EXT_REG_OFFSET 0x100 117 118 struct hclgevf_tqp_map { 119 __le16 tqp_id; /* Absolute tqp id for in this pf */ 120 u8 tqp_vf; /* VF id */ 121 #define HCLGEVF_TQP_MAP_TYPE_PF 0 122 #define HCLGEVF_TQP_MAP_TYPE_VF 1 123 #define HCLGEVF_TQP_MAP_TYPE_B 0 124 #define HCLGEVF_TQP_MAP_EN_B 1 125 u8 tqp_flag; /* Indicate it's pf or vf tqp */ 126 __le16 tqp_vid; /* Virtual id in this pf/vf */ 127 u8 rsv[18]; 128 }; 129 130 #define HCLGEVF_VECTOR_ELEMENTS_PER_CMD 10 131 132 enum hclgevf_int_type { 133 HCLGEVF_INT_TX = 0, 134 HCLGEVF_INT_RX, 135 HCLGEVF_INT_EVENT, 136 }; 137 138 struct hclgevf_ctrl_vector_chain { 139 u8 int_vector_id; 140 u8 int_cause_num; 141 #define HCLGEVF_INT_TYPE_S 0 142 #define HCLGEVF_INT_TYPE_M 0x3 143 #define HCLGEVF_TQP_ID_S 2 144 #define HCLGEVF_TQP_ID_M (0x3fff << HCLGEVF_TQP_ID_S) 145 __le16 tqp_type_and_id[HCLGEVF_VECTOR_ELEMENTS_PER_CMD]; 146 u8 vfid; 147 u8 resv; 148 }; 149 150 enum HCLGEVF_CAP_BITS { 151 HCLGEVF_CAP_UDP_GSO_B, 152 HCLGEVF_CAP_QB_B, 153 HCLGEVF_CAP_FD_FORWARD_TC_B, 154 HCLGEVF_CAP_PTP_B, 155 HCLGEVF_CAP_INT_QL_B, 156 HCLGEVF_CAP_HW_TX_CSUM_B, 157 HCLGEVF_CAP_TX_PUSH_B, 158 HCLGEVF_CAP_PHY_IMP_B, 159 HCLGEVF_CAP_TQP_TXRX_INDEP_B, 160 HCLGEVF_CAP_HW_PAD_B, 161 HCLGEVF_CAP_STASH_B, 162 HCLGEVF_CAP_UDP_TUNNEL_CSUM_B, 163 HCLGEVF_CAP_RXD_ADV_LAYOUT_B = 15, 164 }; 165 166 enum HCLGEVF_API_CAP_BITS { 167 HCLGEVF_API_CAP_FLEX_RSS_TBL_B, 168 }; 169 170 #define HCLGEVF_QUERY_CAP_LENGTH 3 171 struct hclgevf_query_version_cmd { 172 __le32 firmware; 173 __le32 hardware; 174 __le32 api_caps; 175 __le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */ 176 }; 177 178 #define HCLGEVF_MSIX_OFT_ROCEE_S 0 179 #define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S) 180 #define HCLGEVF_VEC_NUM_S 0 181 #define HCLGEVF_VEC_NUM_M (0xff << HCLGEVF_VEC_NUM_S) 182 struct hclgevf_query_res_cmd { 183 __le16 tqp_num; 184 __le16 reserved; 185 __le16 msixcap_localid_ba_nic; 186 __le16 msixcap_localid_ba_rocee; 187 __le16 vf_intr_vector_number; 188 __le16 rsv[7]; 189 }; 190 191 #define HCLGEVF_GRO_EN_B 0 192 struct hclgevf_cfg_gro_status_cmd { 193 u8 gro_en; 194 u8 rsv[23]; 195 }; 196 197 #define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4 198 #define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4 199 #define HCLGEVF_RSS_HASH_KEY_NUM 16 200 struct hclgevf_rss_config_cmd { 201 u8 hash_config; 202 u8 rsv[7]; 203 u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM]; 204 }; 205 206 struct hclgevf_rss_input_tuple_cmd { 207 u8 ipv4_tcp_en; 208 u8 ipv4_udp_en; 209 u8 ipv4_sctp_en; 210 u8 ipv4_fragment_en; 211 u8 ipv6_tcp_en; 212 u8 ipv6_udp_en; 213 u8 ipv6_sctp_en; 214 u8 ipv6_fragment_en; 215 u8 rsv[16]; 216 }; 217 218 #define HCLGEVF_RSS_CFG_TBL_SIZE 16 219 220 struct hclgevf_rss_indirection_table_cmd { 221 __le16 start_table_index; 222 __le16 rss_set_bitmap; 223 u8 rsv[4]; 224 u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE]; 225 }; 226 227 #define HCLGEVF_RSS_TC_OFFSET_S 0 228 #define HCLGEVF_RSS_TC_OFFSET_M GENMASK(10, 0) 229 #define HCLGEVF_RSS_TC_SIZE_MSB_B 11 230 #define HCLGEVF_RSS_TC_SIZE_S 12 231 #define HCLGEVF_RSS_TC_SIZE_M GENMASK(14, 12) 232 #define HCLGEVF_RSS_TC_VALID_B 15 233 #define HCLGEVF_MAX_TC_NUM 8 234 #define HCLGEVF_RSS_TC_SIZE_MSB_OFFSET 3 235 236 struct hclgevf_rss_tc_mode_cmd { 237 __le16 rss_tc_mode[HCLGEVF_MAX_TC_NUM]; 238 u8 rsv[8]; 239 }; 240 241 #define HCLGEVF_LINK_STS_B 0 242 #define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B) 243 struct hclgevf_link_status_cmd { 244 u8 status; 245 u8 rsv[23]; 246 }; 247 248 #define HCLGEVF_RING_ID_MASK 0x3ff 249 #define HCLGEVF_TQP_ENABLE_B 0 250 251 struct hclgevf_cfg_com_tqp_queue_cmd { 252 __le16 tqp_id; 253 __le16 stream_id; 254 u8 enable; 255 u8 rsv[19]; 256 }; 257 258 struct hclgevf_cfg_tx_queue_pointer_cmd { 259 __le16 tqp_id; 260 __le16 tx_tail; 261 __le16 tx_head; 262 __le16 fbd_num; 263 __le16 ring_offset; 264 u8 rsv[14]; 265 }; 266 267 #define HCLGEVF_TYPE_CRQ 0 268 #define HCLGEVF_TYPE_CSQ 1 269 270 /* this bit indicates that the driver is ready for hardware reset */ 271 #define HCLGEVF_NIC_SW_RST_RDY_B 16 272 #define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B) 273 274 #define HCLGEVF_NIC_CMQ_DESC_NUM 1024 275 #define HCLGEVF_NIC_CMQ_DESC_NUM_S 3 276 277 #define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4 278 279 struct hclgevf_dev_specs_0_cmd { 280 __le32 rsv0; 281 __le32 mac_entry_num; 282 __le32 mng_entry_num; 283 __le16 rss_ind_tbl_size; 284 __le16 rss_key_size; 285 __le16 int_ql_max; 286 u8 max_non_tso_bd_num; 287 u8 rsv1[5]; 288 }; 289 290 #define HCLGEVF_DEF_MAX_INT_GL 0x1FE0U 291 292 struct hclgevf_dev_specs_1_cmd { 293 __le16 max_frm_size; 294 __le16 rsv0; 295 __le16 max_int_gl; 296 u8 rsv1[18]; 297 }; 298 299 /* capabilities bits map between imp firmware and local driver */ 300 struct hclgevf_caps_bit_map { 301 u16 imp_bit; 302 u16 local_bit; 303 }; 304 305 static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) 306 { 307 writel(value, base + reg); 308 } 309 310 static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg) 311 { 312 u8 __iomem *reg_addr = READ_ONCE(base); 313 314 return readl(reg_addr + reg); 315 } 316 317 #define hclgevf_write_dev(a, reg, value) \ 318 hclgevf_write_reg((a)->io_base, reg, value) 319 #define hclgevf_read_dev(a, reg) \ 320 hclgevf_read_reg((a)->io_base, reg) 321 322 #define HCLGEVF_SEND_SYNC(flag) \ 323 ((flag) & HCLGEVF_CMD_FLAG_NO_INTR) 324 325 int hclgevf_cmd_init(struct hclgevf_dev *hdev); 326 void hclgevf_cmd_uninit(struct hclgevf_dev *hdev); 327 int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev); 328 329 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num); 330 void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, 331 enum hclgevf_opcode_type opcode, 332 bool is_read); 333 #endif 334