1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* Copyright (c) 2016-2017 Hisilicon Limited. */ 3 4 #ifndef __HCLGEVF_CMD_H 5 #define __HCLGEVF_CMD_H 6 #include <linux/io.h> 7 #include <linux/types.h> 8 #include "hnae3.h" 9 10 #define HCLGEVF_CMDQ_TX_TIMEOUT 30000 11 #define HCLGEVF_CMDQ_RX_INVLD_B 0 12 #define HCLGEVF_CMDQ_RX_OUTVLD_B 1 13 14 struct hclgevf_hw; 15 struct hclgevf_dev; 16 17 struct hclgevf_desc { 18 __le16 opcode; 19 __le16 flag; 20 __le16 retval; 21 __le16 rsv; 22 __le32 data[6]; 23 }; 24 25 struct hclgevf_desc_cb { 26 dma_addr_t dma; 27 void *va; 28 u32 length; 29 }; 30 31 struct hclgevf_cmq_ring { 32 dma_addr_t desc_dma_addr; 33 struct hclgevf_desc *desc; 34 struct hclgevf_desc_cb *desc_cb; 35 struct hclgevf_dev *dev; 36 u32 head; 37 u32 tail; 38 39 u16 buf_size; 40 u16 desc_num; 41 int next_to_use; 42 int next_to_clean; 43 u8 flag; 44 spinlock_t lock; /* Command queue lock */ 45 }; 46 47 enum hclgevf_cmd_return_status { 48 HCLGEVF_CMD_EXEC_SUCCESS = 0, 49 HCLGEVF_CMD_NO_AUTH = 1, 50 HCLGEVF_CMD_NOT_EXEC = 2, 51 HCLGEVF_CMD_QUEUE_FULL = 3, 52 }; 53 54 enum hclgevf_cmd_status { 55 HCLGEVF_STATUS_SUCCESS = 0, 56 HCLGEVF_ERR_CSQ_FULL = -1, 57 HCLGEVF_ERR_CSQ_TIMEOUT = -2, 58 HCLGEVF_ERR_CSQ_ERROR = -3 59 }; 60 61 struct hclgevf_cmq { 62 struct hclgevf_cmq_ring csq; 63 struct hclgevf_cmq_ring crq; 64 u16 tx_timeout; /* Tx timeout */ 65 enum hclgevf_cmd_status last_status; 66 }; 67 68 #define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0 69 #define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1 70 #define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2 71 #define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3 72 #define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4 73 #define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5 74 75 #define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT) 76 #define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT) 77 #define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT) 78 #define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT) 79 #define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT) 80 #define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT) 81 82 enum hclgevf_opcode_type { 83 /* Generic command */ 84 HCLGEVF_OPC_QUERY_FW_VER = 0x0001, 85 /* TQP command */ 86 HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03, 87 HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13, 88 HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20, 89 /* RSS cmd */ 90 HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01, 91 HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07, 92 HCLGEVF_OPC_RSS_TC_MODE = 0x0D08, 93 /* Mailbox cmd */ 94 HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001, 95 }; 96 97 #define HCLGEVF_TQP_REG_OFFSET 0x80000 98 #define HCLGEVF_TQP_REG_SIZE 0x200 99 100 struct hclgevf_tqp_map { 101 __le16 tqp_id; /* Absolute tqp id for in this pf */ 102 u8 tqp_vf; /* VF id */ 103 #define HCLGEVF_TQP_MAP_TYPE_PF 0 104 #define HCLGEVF_TQP_MAP_TYPE_VF 1 105 #define HCLGEVF_TQP_MAP_TYPE_B 0 106 #define HCLGEVF_TQP_MAP_EN_B 1 107 u8 tqp_flag; /* Indicate it's pf or vf tqp */ 108 __le16 tqp_vid; /* Virtual id in this pf/vf */ 109 u8 rsv[18]; 110 }; 111 112 #define HCLGEVF_VECTOR_ELEMENTS_PER_CMD 10 113 114 enum hclgevf_int_type { 115 HCLGEVF_INT_TX = 0, 116 HCLGEVF_INT_RX, 117 HCLGEVF_INT_EVENT, 118 }; 119 120 struct hclgevf_ctrl_vector_chain { 121 u8 int_vector_id; 122 u8 int_cause_num; 123 #define HCLGEVF_INT_TYPE_S 0 124 #define HCLGEVF_INT_TYPE_M 0x3 125 #define HCLGEVF_TQP_ID_S 2 126 #define HCLGEVF_TQP_ID_M (0x3fff << HCLGEVF_TQP_ID_S) 127 __le16 tqp_type_and_id[HCLGEVF_VECTOR_ELEMENTS_PER_CMD]; 128 u8 vfid; 129 u8 resv; 130 }; 131 132 struct hclgevf_query_version_cmd { 133 __le32 firmware; 134 __le32 firmware_rsv[5]; 135 }; 136 137 #define HCLGEVF_RSS_HASH_KEY_OFFSET 4 138 #define HCLGEVF_RSS_HASH_KEY_NUM 16 139 struct hclgevf_rss_config_cmd { 140 u8 hash_config; 141 u8 rsv[7]; 142 u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM]; 143 }; 144 145 struct hclgevf_rss_input_tuple_cmd { 146 u8 ipv4_tcp_en; 147 u8 ipv4_udp_en; 148 u8 ipv4_stcp_en; 149 u8 ipv4_fragment_en; 150 u8 ipv6_tcp_en; 151 u8 ipv6_udp_en; 152 u8 ipv6_stcp_en; 153 u8 ipv6_fragment_en; 154 u8 rsv[16]; 155 }; 156 157 #define HCLGEVF_RSS_CFG_TBL_SIZE 16 158 159 struct hclgevf_rss_indirection_table_cmd { 160 u16 start_table_index; 161 u16 rss_set_bitmap; 162 u8 rsv[4]; 163 u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE]; 164 }; 165 166 #define HCLGEVF_RSS_TC_OFFSET_S 0 167 #define HCLGEVF_RSS_TC_OFFSET_M (0x3ff << HCLGEVF_RSS_TC_OFFSET_S) 168 #define HCLGEVF_RSS_TC_SIZE_S 12 169 #define HCLGEVF_RSS_TC_SIZE_M (0x7 << HCLGEVF_RSS_TC_SIZE_S) 170 #define HCLGEVF_RSS_TC_VALID_B 15 171 #define HCLGEVF_MAX_TC_NUM 8 172 struct hclgevf_rss_tc_mode_cmd { 173 u16 rss_tc_mode[HCLGEVF_MAX_TC_NUM]; 174 u8 rsv[8]; 175 }; 176 177 #define HCLGEVF_LINK_STS_B 0 178 #define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B) 179 struct hclgevf_link_status_cmd { 180 u8 status; 181 u8 rsv[23]; 182 }; 183 184 #define HCLGEVF_RING_ID_MASK 0x3ff 185 #define HCLGEVF_TQP_ENABLE_B 0 186 187 struct hclgevf_cfg_com_tqp_queue_cmd { 188 __le16 tqp_id; 189 __le16 stream_id; 190 u8 enable; 191 u8 rsv[19]; 192 }; 193 194 struct hclgevf_cfg_tx_queue_pointer_cmd { 195 __le16 tqp_id; 196 __le16 tx_tail; 197 __le16 tx_head; 198 __le16 fbd_num; 199 __le16 ring_offset; 200 u8 rsv[14]; 201 }; 202 203 #define HCLGEVF_TYPE_CRQ 0 204 #define HCLGEVF_TYPE_CSQ 1 205 #define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000 206 #define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004 207 #define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008 208 #define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010 209 #define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014 210 #define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018 211 #define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701c 212 #define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020 213 #define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024 214 #define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028 215 #define HCLGEVF_NIC_CMQ_EN_B 16 216 #define HCLGEVF_NIC_CMQ_ENABLE BIT(HCLGEVF_NIC_CMQ_EN_B) 217 #define HCLGEVF_NIC_CMQ_DESC_NUM 1024 218 #define HCLGEVF_NIC_CMQ_DESC_NUM_S 3 219 #define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100 220 221 static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) 222 { 223 writel(value, base + reg); 224 } 225 226 static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg) 227 { 228 u8 __iomem *reg_addr = READ_ONCE(base); 229 230 return readl(reg_addr + reg); 231 } 232 233 #define hclgevf_write_dev(a, reg, value) \ 234 hclgevf_write_reg((a)->io_base, (reg), (value)) 235 #define hclgevf_read_dev(a, reg) \ 236 hclgevf_read_reg((a)->io_base, (reg)) 237 238 #define HCLGEVF_SEND_SYNC(flag) \ 239 ((flag) & HCLGEVF_CMD_FLAG_NO_INTR) 240 241 int hclgevf_cmd_init(struct hclgevf_dev *hdev); 242 void hclgevf_cmd_uninit(struct hclgevf_dev *hdev); 243 244 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num); 245 void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, 246 enum hclgevf_opcode_type opcode, 247 bool is_read); 248 #endif 249