xref: /openbmc/linux/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h (revision 8f8d5745bb520c76b81abef4a2cb3023d0313bfd)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
3 
4 #ifndef __HCLGEVF_MAIN_H
5 #define __HCLGEVF_MAIN_H
6 #include <linux/fs.h>
7 #include <linux/types.h>
8 #include "hclge_mbx.h"
9 #include "hclgevf_cmd.h"
10 #include "hnae3.h"
11 
12 #define HCLGEVF_MOD_VERSION "1.0"
13 #define HCLGEVF_DRIVER_NAME "hclgevf"
14 
15 #define HCLGEVF_MISC_VECTOR_NUM		0
16 
17 #define HCLGEVF_INVALID_VPORT		0xffff
18 
19 /* This number in actual depends upon the total number of VFs
20  * created by physical function. But the maximum number of
21  * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}.
22  */
23 #define HCLGEVF_MAX_VF_VECTOR_NUM	(32 + 1)
24 
25 #define HCLGEVF_VECTOR_REG_BASE		0x20000
26 #define HCLGEVF_MISC_VECTOR_REG_BASE	0x20400
27 #define HCLGEVF_VECTOR_REG_OFFSET	0x4
28 #define HCLGEVF_VECTOR_VF_OFFSET		0x100000
29 
30 /* bar registers for cmdq */
31 #define HCLGEVF_CMDQ_TX_ADDR_L_REG		0x27000
32 #define HCLGEVF_CMDQ_TX_ADDR_H_REG		0x27004
33 #define HCLGEVF_CMDQ_TX_DEPTH_REG		0x27008
34 #define HCLGEVF_CMDQ_TX_TAIL_REG		0x27010
35 #define HCLGEVF_CMDQ_TX_HEAD_REG		0x27014
36 #define HCLGEVF_CMDQ_RX_ADDR_L_REG		0x27018
37 #define HCLGEVF_CMDQ_RX_ADDR_H_REG		0x2701C
38 #define HCLGEVF_CMDQ_RX_DEPTH_REG		0x27020
39 #define HCLGEVF_CMDQ_RX_TAIL_REG		0x27024
40 #define HCLGEVF_CMDQ_RX_HEAD_REG		0x27028
41 #define HCLGEVF_CMDQ_INTR_SRC_REG		0x27100
42 #define HCLGEVF_CMDQ_INTR_STS_REG		0x27104
43 #define HCLGEVF_CMDQ_INTR_EN_REG		0x27108
44 #define HCLGEVF_CMDQ_INTR_GEN_REG		0x2710C
45 
46 /* bar registers for common func */
47 #define HCLGEVF_GRO_EN_REG			0x28000
48 
49 /* bar registers for rcb */
50 #define HCLGEVF_RING_RX_ADDR_L_REG		0x80000
51 #define HCLGEVF_RING_RX_ADDR_H_REG		0x80004
52 #define HCLGEVF_RING_RX_BD_NUM_REG		0x80008
53 #define HCLGEVF_RING_RX_BD_LENGTH_REG		0x8000C
54 #define HCLGEVF_RING_RX_MERGE_EN_REG		0x80014
55 #define HCLGEVF_RING_RX_TAIL_REG		0x80018
56 #define HCLGEVF_RING_RX_HEAD_REG		0x8001C
57 #define HCLGEVF_RING_RX_FBD_NUM_REG		0x80020
58 #define HCLGEVF_RING_RX_OFFSET_REG		0x80024
59 #define HCLGEVF_RING_RX_FBD_OFFSET_REG		0x80028
60 #define HCLGEVF_RING_RX_STASH_REG		0x80030
61 #define HCLGEVF_RING_RX_BD_ERR_REG		0x80034
62 #define HCLGEVF_RING_TX_ADDR_L_REG		0x80040
63 #define HCLGEVF_RING_TX_ADDR_H_REG		0x80044
64 #define HCLGEVF_RING_TX_BD_NUM_REG		0x80048
65 #define HCLGEVF_RING_TX_PRIORITY_REG		0x8004C
66 #define HCLGEVF_RING_TX_TC_REG			0x80050
67 #define HCLGEVF_RING_TX_MERGE_EN_REG		0x80054
68 #define HCLGEVF_RING_TX_TAIL_REG		0x80058
69 #define HCLGEVF_RING_TX_HEAD_REG		0x8005C
70 #define HCLGEVF_RING_TX_FBD_NUM_REG		0x80060
71 #define HCLGEVF_RING_TX_OFFSET_REG		0x80064
72 #define HCLGEVF_RING_TX_EBD_NUM_REG		0x80068
73 #define HCLGEVF_RING_TX_EBD_OFFSET_REG		0x80070
74 #define HCLGEVF_RING_TX_BD_ERR_REG		0x80074
75 #define HCLGEVF_RING_EN_REG			0x80090
76 
77 /* bar registers for tqp interrupt */
78 #define HCLGEVF_TQP_INTR_CTRL_REG		0x20000
79 #define HCLGEVF_TQP_INTR_GL0_REG		0x20100
80 #define HCLGEVF_TQP_INTR_GL1_REG		0x20200
81 #define HCLGEVF_TQP_INTR_GL2_REG		0x20300
82 #define HCLGEVF_TQP_INTR_RL_REG			0x20900
83 
84 /* Vector0 interrupt CMDQ event source register(RW) */
85 #define HCLGEVF_VECTOR0_CMDQ_SRC_REG	0x27100
86 /* CMDQ register bits for RX event(=MBX event) */
87 #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B	1
88 /* RST register bits for RESET event */
89 #define HCLGEVF_VECTOR0_RST_INT_B	2
90 
91 #define HCLGEVF_TQP_RESET_TRY_TIMES	10
92 /* Reset related Registers */
93 #define HCLGEVF_RST_ING			0x20C00
94 #define HCLGEVF_FUN_RST_ING_BIT		BIT(0)
95 #define HCLGEVF_GLOBAL_RST_ING_BIT	BIT(5)
96 #define HCLGEVF_CORE_RST_ING_BIT	BIT(6)
97 #define HCLGEVF_IMP_RST_ING_BIT		BIT(7)
98 #define HCLGEVF_RST_ING_BITS \
99 	(HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
100 	 HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
101 
102 #define HCLGEVF_RSS_IND_TBL_SIZE		512
103 #define HCLGEVF_RSS_SET_BITMAP_MSK	0xffff
104 #define HCLGEVF_RSS_KEY_SIZE		40
105 #define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ	0
106 #define HCLGEVF_RSS_HASH_ALGO_SIMPLE	1
107 #define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC	2
108 #define HCLGEVF_RSS_HASH_ALGO_MASK	0xf
109 #define HCLGEVF_RSS_CFG_TBL_NUM \
110 	(HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
111 #define HCLGEVF_RSS_INPUT_TUPLE_OTHER	GENMASK(3, 0)
112 #define HCLGEVF_RSS_INPUT_TUPLE_SCTP	GENMASK(4, 0)
113 #define HCLGEVF_D_PORT_BIT		BIT(0)
114 #define HCLGEVF_S_PORT_BIT		BIT(1)
115 #define HCLGEVF_D_IP_BIT		BIT(2)
116 #define HCLGEVF_S_IP_BIT		BIT(3)
117 #define HCLGEVF_V_TAG_BIT		BIT(4)
118 
119 enum hclgevf_evt_cause {
120 	HCLGEVF_VECTOR0_EVENT_RST,
121 	HCLGEVF_VECTOR0_EVENT_MBX,
122 	HCLGEVF_VECTOR0_EVENT_OTHER,
123 };
124 
125 /* states of hclgevf device & tasks */
126 enum hclgevf_states {
127 	/* device states */
128 	HCLGEVF_STATE_DOWN,
129 	HCLGEVF_STATE_DISABLED,
130 	HCLGEVF_STATE_IRQ_INITED,
131 	/* task states */
132 	HCLGEVF_STATE_SERVICE_SCHED,
133 	HCLGEVF_STATE_RST_SERVICE_SCHED,
134 	HCLGEVF_STATE_RST_HANDLING,
135 	HCLGEVF_STATE_MBX_SERVICE_SCHED,
136 	HCLGEVF_STATE_MBX_HANDLING,
137 	HCLGEVF_STATE_CMD_DISABLE,
138 };
139 
140 #define HCLGEVF_MPF_ENBALE 1
141 
142 struct hclgevf_mac {
143 	u8 media_type;
144 	u8 mac_addr[ETH_ALEN];
145 	int link;
146 	u8 duplex;
147 	u32 speed;
148 	u64 supported;
149 	u64 advertising;
150 };
151 
152 struct hclgevf_hw {
153 	void __iomem *io_base;
154 	int num_vec;
155 	struct hclgevf_cmq cmq;
156 	struct hclgevf_mac mac;
157 	void *hdev; /* hchgevf device it is part of */
158 };
159 
160 /* TQP stats */
161 struct hlcgevf_tqp_stats {
162 	/* query_tqp_tx_queue_statistics ,opcode id:  0x0B03 */
163 	u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
164 	/* query_tqp_rx_queue_statistics ,opcode id:  0x0B13 */
165 	u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
166 };
167 
168 struct hclgevf_tqp {
169 	struct device *dev;	/* device for DMA mapping */
170 	struct hnae3_queue q;
171 	struct hlcgevf_tqp_stats tqp_stats;
172 	u16 index;		/* global index in a NIC controller */
173 
174 	bool alloced;
175 };
176 
177 struct hclgevf_cfg {
178 	u8 vmdq_vport_num;
179 	u8 tc_num;
180 	u16 tqp_desc_num;
181 	u16 rx_buf_len;
182 	u8 phy_addr;
183 	u8 media_type;
184 	u8 mac_addr[ETH_ALEN];
185 	u32 numa_node_map;
186 };
187 
188 struct hclgevf_rss_tuple_cfg {
189 	u8 ipv4_tcp_en;
190 	u8 ipv4_udp_en;
191 	u8 ipv4_sctp_en;
192 	u8 ipv4_fragment_en;
193 	u8 ipv6_tcp_en;
194 	u8 ipv6_udp_en;
195 	u8 ipv6_sctp_en;
196 	u8 ipv6_fragment_en;
197 };
198 
199 struct hclgevf_rss_cfg {
200 	u8  rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
201 	u32 hash_algo;
202 	u32 rss_size;
203 	u8 hw_tc_map;
204 	u8  rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
205 	struct hclgevf_rss_tuple_cfg rss_tuple_sets;
206 };
207 
208 struct hclgevf_misc_vector {
209 	u8 __iomem *addr;
210 	int vector_irq;
211 };
212 
213 struct hclgevf_dev {
214 	struct pci_dev *pdev;
215 	struct hnae3_ae_dev *ae_dev;
216 	struct hclgevf_hw hw;
217 	struct hclgevf_misc_vector misc_vector;
218 	struct hclgevf_rss_cfg rss_cfg;
219 	unsigned long state;
220 	unsigned long flr_state;
221 	unsigned long default_reset_request;
222 	unsigned long last_reset_time;
223 	enum hnae3_reset_type reset_level;
224 	unsigned long reset_pending;
225 	enum hnae3_reset_type reset_type;
226 
227 #define HCLGEVF_RESET_REQUESTED		0
228 #define HCLGEVF_RESET_PENDING		1
229 	unsigned long reset_state;	/* requested, pending */
230 	unsigned long reset_count;	/* the number of reset has been done */
231 	u32 reset_attempts;
232 
233 	u32 fw_version;
234 	u16 num_tqps;		/* num task queue pairs of this PF */
235 
236 	u16 alloc_rss_size;	/* allocated RSS task queue */
237 	u16 rss_size_max;	/* HW defined max RSS task queue */
238 
239 	u16 num_alloc_vport;	/* num vports this driver supports */
240 	u32 numa_node_mask;
241 	u16 rx_buf_len;
242 	u16 num_tx_desc;	/* desc num of per tx queue */
243 	u16 num_rx_desc;	/* desc num of per rx queue */
244 	u8 hw_tc_map;
245 
246 	u16 num_msi;
247 	u16 num_msi_left;
248 	u16 num_msi_used;
249 	u16 num_roce_msix;	/* Num of roce vectors for this VF */
250 	u16 roce_base_msix_offset;
251 	int roce_base_vector;
252 	u32 base_msi_vector;
253 	u16 *vector_status;
254 	int *vector_irq;
255 
256 	bool mbx_event_pending;
257 	struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
258 	struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
259 
260 	struct timer_list service_timer;
261 	struct timer_list keep_alive_timer;
262 	struct work_struct service_task;
263 	struct work_struct keep_alive_task;
264 	struct work_struct rst_service_task;
265 	struct work_struct mbx_service_task;
266 
267 	struct hclgevf_tqp *htqp;
268 
269 	struct hnae3_handle nic;
270 	struct hnae3_handle roce;
271 
272 	struct hnae3_client *nic_client;
273 	struct hnae3_client *roce_client;
274 	u32 flag;
275 };
276 
277 static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
278 {
279 	return !!hdev->reset_pending;
280 }
281 
282 int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
283 			 const u8 *msg_data, u8 msg_len, bool need_resp,
284 			 u8 *resp_data, u16 resp_len);
285 void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
286 void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev);
287 
288 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state);
289 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
290 				 u8 duplex);
291 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
292 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
293 #endif
294