1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
3 
4 #ifndef __HCLGEVF_MAIN_H
5 #define __HCLGEVF_MAIN_H
6 #include <linux/fs.h>
7 #include <linux/types.h>
8 #include "hclge_mbx.h"
9 #include "hclgevf_cmd.h"
10 #include "hnae3.h"
11 
12 #define HCLGEVF_MOD_VERSION "1.0"
13 #define HCLGEVF_DRIVER_NAME "hclgevf"
14 
15 #define HCLGEVF_ROCEE_VECTOR_NUM	0
16 #define HCLGEVF_MISC_VECTOR_NUM		0
17 
18 #define HCLGEVF_INVALID_VPORT		0xffff
19 
20 /* This number in actual depends upon the total number of VFs
21  * created by physical function. But the maximum number of
22  * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}.
23  */
24 #define HCLGEVF_MAX_VF_VECTOR_NUM	(32 + 1)
25 
26 #define HCLGEVF_VECTOR_REG_BASE		0x20000
27 #define HCLGEVF_MISC_VECTOR_REG_BASE	0x20400
28 #define HCLGEVF_VECTOR_REG_OFFSET	0x4
29 #define HCLGEVF_VECTOR_VF_OFFSET		0x100000
30 
31 /* Vector0 interrupt CMDQ event source register(RW) */
32 #define HCLGEVF_VECTOR0_CMDQ_SRC_REG	0x27100
33 /* CMDQ register bits for RX event(=MBX event) */
34 #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B	1
35 
36 #define HCLGEVF_TQP_RESET_TRY_TIMES	10
37 /* Reset related Registers */
38 #define HCLGEVF_FUN_RST_ING		0x20C00
39 #define HCLGEVF_FUN_RST_ING_B		0
40 
41 #define HCLGEVF_RSS_IND_TBL_SIZE		512
42 #define HCLGEVF_RSS_SET_BITMAP_MSK	0xffff
43 #define HCLGEVF_RSS_KEY_SIZE		40
44 #define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ	0
45 #define HCLGEVF_RSS_HASH_ALGO_SIMPLE	1
46 #define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC	2
47 #define HCLGEVF_RSS_HASH_ALGO_MASK	0xf
48 #define HCLGEVF_RSS_CFG_TBL_NUM \
49 	(HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
50 
51 #define HCLGEVF_MTA_TBL_SIZE		4096
52 #define HCLGEVF_MTA_TYPE_SEL_MAX	4
53 
54 /* states of hclgevf device & tasks */
55 enum hclgevf_states {
56 	/* device states */
57 	HCLGEVF_STATE_DOWN,
58 	HCLGEVF_STATE_DISABLED,
59 	/* task states */
60 	HCLGEVF_STATE_SERVICE_SCHED,
61 	HCLGEVF_STATE_RST_SERVICE_SCHED,
62 	HCLGEVF_STATE_RST_HANDLING,
63 	HCLGEVF_STATE_MBX_SERVICE_SCHED,
64 	HCLGEVF_STATE_MBX_HANDLING,
65 };
66 
67 #define HCLGEVF_MPF_ENBALE 1
68 
69 struct hclgevf_mac {
70 	u8 mac_addr[ETH_ALEN];
71 	int link;
72 	u8 duplex;
73 	u32 speed;
74 };
75 
76 struct hclgevf_hw {
77 	void __iomem *io_base;
78 	int num_vec;
79 	struct hclgevf_cmq cmq;
80 	struct hclgevf_mac mac;
81 	void *hdev; /* hchgevf device it is part of */
82 };
83 
84 /* TQP stats */
85 struct hlcgevf_tqp_stats {
86 	/* query_tqp_tx_queue_statistics ,opcode id:  0x0B03 */
87 	u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
88 	/* query_tqp_rx_queue_statistics ,opcode id:  0x0B13 */
89 	u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
90 };
91 
92 struct hclgevf_tqp {
93 	struct device *dev;	/* device for DMA mapping */
94 	struct hnae3_queue q;
95 	struct hlcgevf_tqp_stats tqp_stats;
96 	u16 index;		/* global index in a NIC controller */
97 
98 	bool alloced;
99 };
100 
101 struct hclgevf_cfg {
102 	u8 vmdq_vport_num;
103 	u8 tc_num;
104 	u16 tqp_desc_num;
105 	u16 rx_buf_len;
106 	u8 phy_addr;
107 	u8 media_type;
108 	u8 mac_addr[ETH_ALEN];
109 	u32 numa_node_map;
110 };
111 
112 struct hclgevf_rss_cfg {
113 	u8  rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
114 	u32 hash_algo;
115 	u32 rss_size;
116 	u8 hw_tc_map;
117 	u8  rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
118 };
119 
120 struct hclgevf_misc_vector {
121 	u8 __iomem *addr;
122 	int vector_irq;
123 };
124 
125 struct hclgevf_dev {
126 	struct pci_dev *pdev;
127 	struct hnae3_ae_dev *ae_dev;
128 	struct hclgevf_hw hw;
129 	struct hclgevf_misc_vector misc_vector;
130 	struct hclgevf_rss_cfg rss_cfg;
131 	unsigned long state;
132 
133 #define HCLGEVF_RESET_REQUESTED		0
134 #define HCLGEVF_RESET_PENDING		1
135 	unsigned long reset_state;	/* requested, pending */
136 	u32 reset_attempts;
137 
138 	u32 fw_version;
139 	u16 num_tqps;		/* num task queue pairs of this PF */
140 
141 	u16 alloc_rss_size;	/* allocated RSS task queue */
142 	u16 rss_size_max;	/* HW defined max RSS task queue */
143 
144 	u16 num_alloc_vport;	/* num vports this driver supports */
145 	u32 numa_node_mask;
146 	u16 rx_buf_len;
147 	u16 num_desc;
148 	u8 hw_tc_map;
149 
150 	u16 num_msi;
151 	u16 num_msi_left;
152 	u16 num_msi_used;
153 	u32 base_msi_vector;
154 	u16 *vector_status;
155 	int *vector_irq;
156 
157 	bool accept_mta_mc; /* whether to accept mta filter multicast */
158 	u8 mta_mac_sel_type;
159 	bool mbx_event_pending;
160 	struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
161 	struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
162 
163 	struct timer_list service_timer;
164 	struct work_struct service_task;
165 	struct work_struct rst_service_task;
166 	struct work_struct mbx_service_task;
167 
168 	struct hclgevf_tqp *htqp;
169 
170 	struct hnae3_handle nic;
171 	struct hnae3_handle roce;
172 
173 	struct hnae3_client *nic_client;
174 	struct hnae3_client *roce_client;
175 	u32 flag;
176 };
177 
178 static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev)
179 {
180 	return (hdev &&
181 		(test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
182 		(hdev->nic.reset_level == HNAE3_VF_RESET));
183 }
184 
185 static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev)
186 {
187 	return (hdev &&
188 		(test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
189 		(hdev->nic.reset_level == HNAE3_VF_FULL_RESET));
190 }
191 
192 int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
193 			 const u8 *msg_data, u8 msg_len, bool need_resp,
194 			 u8 *resp_data, u16 resp_len);
195 void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
196 void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev);
197 
198 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state);
199 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
200 				 u8 duplex);
201 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
202 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
203 #endif
204