1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_regs.h"
24 #include "hclge_tm.h"
25 #include "hclge_err.h"
26 #include "hnae3.h"
27 #include "hclge_devlink.h"
28 #include "hclge_comm_cmd.h"
29
30 #define HCLGE_NAME "hclge"
31
32 #define HCLGE_BUF_SIZE_UNIT 256U
33 #define HCLGE_BUF_MUL_BY 2
34 #define HCLGE_BUF_DIV_BY 2
35 #define NEED_RESERVE_TC_NUM 2
36 #define BUF_MAX_PERCENT 100
37 #define BUF_RESERVE_PERCENT 90
38
39 #define HCLGE_RESET_MAX_FAIL_CNT 5
40 #define HCLGE_RESET_SYNC_TIME 100
41 #define HCLGE_PF_RESET_SYNC_TIME 20
42 #define HCLGE_PF_RESET_SYNC_CNT 1500
43
44 #define HCLGE_LINK_STATUS_MS 10
45
46 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
47 static int hclge_init_vlan_config(struct hclge_dev *hdev);
48 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
49 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
50 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
51 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
52 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
53 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
54 unsigned long *addr);
55 static int hclge_set_default_loopback(struct hclge_dev *hdev);
56
57 static void hclge_sync_mac_table(struct hclge_dev *hdev);
58 static void hclge_restore_hw_table(struct hclge_dev *hdev);
59 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
60 static void hclge_sync_fd_table(struct hclge_dev *hdev);
61 static void hclge_update_fec_stats(struct hclge_dev *hdev);
62 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
63 int wait_cnt);
64 static int hclge_update_port_info(struct hclge_dev *hdev);
65
66 static struct hnae3_ae_algo ae_algo;
67
68 static struct workqueue_struct *hclge_wq;
69
70 static const struct pci_device_id ae_algo_pci_tbl[] = {
71 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
72 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
73 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
74 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
75 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
77 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
79 /* required last entry */
80 {0, }
81 };
82
83 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
84
85 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
86 "External Loopback test",
87 "App Loopback test",
88 "Serdes serial Loopback test",
89 "Serdes parallel Loopback test",
90 "Phy Loopback test"
91 };
92
93 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
94 {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
95 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
96 {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
97 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
98 {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
99 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
100 {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
101 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
102 {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
103 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
104 {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
105 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
106 {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
107 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
108 {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
109 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
110 {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
111 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
112 {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
113 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
114 {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
115 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
116 {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
117 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
118 {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
119 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
120 {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
121 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
122 {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
123 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
124 {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
125 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
126 {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
127 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
128 {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
129 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
130 {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
131 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
132 {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
133 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
134 {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
135 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
136 {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
137 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
138 {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
139 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
140 {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
141 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
142 {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
143 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
144 {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
145 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
146 {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
147 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
148 {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
149 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
150 {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
151 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
152 {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
153 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
154 {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
155 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
156 {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
157 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
158 {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
159 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
160 {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
162 {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
163 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
164 {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
166 {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
167 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
168 {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
169 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
170 {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
171 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
172 {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
173 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
174 {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
176 {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
178 {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
180 {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
182 {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
184 {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
185 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
186 {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
187 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
188 {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
189 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
190 {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
191 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
192 {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
193 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
194 {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
195 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
196 {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
197 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
198 {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
199 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
200 {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
201 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
202 {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
204 {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
206 {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
208 {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
210 {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
212 {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
214 {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
216 {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
218 {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
220 {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
222 {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
224 {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
225 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
226 {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
228 {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
229 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
230 {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
231 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
232 {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
233 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
234 {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
235 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
236 {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
237 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
238 {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
239 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
240 {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
241 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
242 {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
243 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
244 {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
246 {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
248 {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
250 {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
252 {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
254 {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
256 {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
258 {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
260 {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
262 {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
264 {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
266 {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
268 {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
270 {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
272 {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
274
275 {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
276 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
277 {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
278 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
279 {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
280 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
281 {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
282 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
283 {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
284 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
285 {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
286 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
287 {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
289 {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
291 {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
293 {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
295 {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
297 {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
299 };
300
301 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
302 {
303 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
304 .ethter_type = cpu_to_le16(ETH_P_LLDP),
305 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
306 .i_port_bitmap = 0x1,
307 },
308 };
309
310 static const struct key_info meta_data_key_info[] = {
311 { PACKET_TYPE_ID, 6 },
312 { IP_FRAGEMENT, 1 },
313 { ROCE_TYPE, 1 },
314 { NEXT_KEY, 5 },
315 { VLAN_NUMBER, 2 },
316 { SRC_VPORT, 12 },
317 { DST_VPORT, 12 },
318 { TUNNEL_PACKET, 1 },
319 };
320
321 static const struct key_info tuple_key_info[] = {
322 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
323 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
324 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
325 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
326 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
327 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
328 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
329 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
330 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
331 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
332 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
333 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
334 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
335 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
336 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
337 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
338 { INNER_DST_MAC, 48, KEY_OPT_MAC,
339 offsetof(struct hclge_fd_rule, tuples.dst_mac),
340 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
341 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
342 offsetof(struct hclge_fd_rule, tuples.src_mac),
343 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
344 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
345 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
346 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
347 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
348 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
349 offsetof(struct hclge_fd_rule, tuples.ether_proto),
350 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
351 { INNER_L2_RSV, 16, KEY_OPT_LE16,
352 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
353 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
354 { INNER_IP_TOS, 8, KEY_OPT_U8,
355 offsetof(struct hclge_fd_rule, tuples.ip_tos),
356 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
357 { INNER_IP_PROTO, 8, KEY_OPT_U8,
358 offsetof(struct hclge_fd_rule, tuples.ip_proto),
359 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
360 { INNER_SRC_IP, 32, KEY_OPT_IP,
361 offsetof(struct hclge_fd_rule, tuples.src_ip),
362 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
363 { INNER_DST_IP, 32, KEY_OPT_IP,
364 offsetof(struct hclge_fd_rule, tuples.dst_ip),
365 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
366 { INNER_L3_RSV, 16, KEY_OPT_LE16,
367 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
368 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
369 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
370 offsetof(struct hclge_fd_rule, tuples.src_port),
371 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
372 { INNER_DST_PORT, 16, KEY_OPT_LE16,
373 offsetof(struct hclge_fd_rule, tuples.dst_port),
374 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
375 { INNER_L4_RSV, 32, KEY_OPT_LE32,
376 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
377 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
378 };
379
380 /**
381 * hclge_cmd_send - send command to command queue
382 * @hw: pointer to the hw struct
383 * @desc: prefilled descriptor for describing the command
384 * @num : the number of descriptors to be sent
385 *
386 * This is the main send command for command queue, it
387 * sends the queue, cleans the queue, etc
388 **/
hclge_cmd_send(struct hclge_hw * hw,struct hclge_desc * desc,int num)389 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
390 {
391 return hclge_comm_cmd_send(&hw->hw, desc, num);
392 }
393
hclge_mac_update_stats_defective(struct hclge_dev * hdev)394 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
395 {
396 #define HCLGE_MAC_CMD_NUM 21
397
398 u64 *data = (u64 *)(&hdev->mac_stats);
399 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
400 __le64 *desc_data;
401 u32 data_size;
402 int ret;
403 u32 i;
404
405 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
406 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
407 if (ret) {
408 dev_err(&hdev->pdev->dev,
409 "Get MAC pkt stats fail, status = %d.\n", ret);
410
411 return ret;
412 }
413
414 /* The first desc has a 64-bit header, so data size need to minus 1 */
415 data_size = sizeof(desc) / (sizeof(u64)) - 1;
416
417 desc_data = (__le64 *)(&desc[0].data[0]);
418 for (i = 0; i < data_size; i++) {
419 /* data memory is continuous becase only the first desc has a
420 * header in this command
421 */
422 *data += le64_to_cpu(*desc_data);
423 data++;
424 desc_data++;
425 }
426
427 return 0;
428 }
429
hclge_mac_update_stats_complete(struct hclge_dev * hdev)430 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
431 {
432 #define HCLGE_REG_NUM_PER_DESC 4
433
434 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
435 u64 *data = (u64 *)(&hdev->mac_stats);
436 struct hclge_desc *desc;
437 __le64 *desc_data;
438 u32 data_size;
439 u32 desc_num;
440 int ret;
441 u32 i;
442
443 /* The first desc has a 64-bit header, so need to consider it */
444 desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
445
446 /* This may be called inside atomic sections,
447 * so GFP_ATOMIC is more suitalbe here
448 */
449 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
450 if (!desc)
451 return -ENOMEM;
452
453 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
454 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
455 if (ret) {
456 kfree(desc);
457 return ret;
458 }
459
460 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
461
462 desc_data = (__le64 *)(&desc[0].data[0]);
463 for (i = 0; i < data_size; i++) {
464 /* data memory is continuous becase only the first desc has a
465 * header in this command
466 */
467 *data += le64_to_cpu(*desc_data);
468 data++;
469 desc_data++;
470 }
471
472 kfree(desc);
473
474 return 0;
475 }
476
hclge_mac_query_reg_num(struct hclge_dev * hdev,u32 * reg_num)477 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
478 {
479 struct hclge_desc desc;
480 int ret;
481
482 /* Driver needs total register number of both valid registers and
483 * reserved registers, but the old firmware only returns number
484 * of valid registers in device V2. To be compatible with these
485 * devices, driver uses a fixed value.
486 */
487 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
488 *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1;
489 return 0;
490 }
491
492 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
493 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
494 if (ret) {
495 dev_err(&hdev->pdev->dev,
496 "failed to query mac statistic reg number, ret = %d\n",
497 ret);
498 return ret;
499 }
500
501 *reg_num = le32_to_cpu(desc.data[0]);
502 if (*reg_num == 0) {
503 dev_err(&hdev->pdev->dev,
504 "mac statistic reg number is invalid!\n");
505 return -ENODATA;
506 }
507
508 return 0;
509 }
510
hclge_mac_update_stats(struct hclge_dev * hdev)511 int hclge_mac_update_stats(struct hclge_dev *hdev)
512 {
513 /* The firmware supports the new statistics acquisition method */
514 if (hdev->ae_dev->dev_specs.mac_stats_num)
515 return hclge_mac_update_stats_complete(hdev);
516 else
517 return hclge_mac_update_stats_defective(hdev);
518 }
519
hclge_comm_get_count(struct hclge_dev * hdev,const struct hclge_comm_stats_str strs[],u32 size)520 static int hclge_comm_get_count(struct hclge_dev *hdev,
521 const struct hclge_comm_stats_str strs[],
522 u32 size)
523 {
524 int count = 0;
525 u32 i;
526
527 for (i = 0; i < size; i++)
528 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
529 count++;
530
531 return count;
532 }
533
hclge_comm_get_stats(struct hclge_dev * hdev,const struct hclge_comm_stats_str strs[],int size,u64 * data)534 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
535 const struct hclge_comm_stats_str strs[],
536 int size, u64 *data)
537 {
538 u64 *buf = data;
539 u32 i;
540
541 for (i = 0; i < size; i++) {
542 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
543 continue;
544
545 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
546 buf++;
547 }
548
549 return buf;
550 }
551
hclge_comm_get_strings(struct hclge_dev * hdev,u32 stringset,const struct hclge_comm_stats_str strs[],int size,u8 * data)552 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
553 const struct hclge_comm_stats_str strs[],
554 int size, u8 *data)
555 {
556 char *buff = (char *)data;
557 u32 i;
558
559 if (stringset != ETH_SS_STATS)
560 return buff;
561
562 for (i = 0; i < size; i++) {
563 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
564 continue;
565
566 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
567 buff = buff + ETH_GSTRING_LEN;
568 }
569
570 return (u8 *)buff;
571 }
572
hclge_update_stats_for_all(struct hclge_dev * hdev)573 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
574 {
575 struct hnae3_handle *handle;
576 int status;
577
578 handle = &hdev->vport[0].nic;
579 if (handle->client) {
580 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
581 if (status) {
582 dev_err(&hdev->pdev->dev,
583 "Update TQPS stats fail, status = %d.\n",
584 status);
585 }
586 }
587
588 hclge_update_fec_stats(hdev);
589
590 status = hclge_mac_update_stats(hdev);
591 if (status)
592 dev_err(&hdev->pdev->dev,
593 "Update MAC stats fail, status = %d.\n", status);
594 }
595
hclge_update_stats(struct hnae3_handle * handle)596 static void hclge_update_stats(struct hnae3_handle *handle)
597 {
598 struct hclge_vport *vport = hclge_get_vport(handle);
599 struct hclge_dev *hdev = vport->back;
600 int status;
601
602 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
603 return;
604
605 status = hclge_mac_update_stats(hdev);
606 if (status)
607 dev_err(&hdev->pdev->dev,
608 "Update MAC stats fail, status = %d.\n",
609 status);
610
611 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
612 if (status)
613 dev_err(&hdev->pdev->dev,
614 "Update TQPS stats fail, status = %d.\n",
615 status);
616
617 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
618 }
619
hclge_get_sset_count(struct hnae3_handle * handle,int stringset)620 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
621 {
622 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
623 HNAE3_SUPPORT_PHY_LOOPBACK | \
624 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
625 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
626 HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
627
628 struct hclge_vport *vport = hclge_get_vport(handle);
629 struct hclge_dev *hdev = vport->back;
630 int count = 0;
631
632 /* Loopback test support rules:
633 * mac: only GE mode support
634 * serdes: all mac mode will support include GE/XGE/LGE/CGE
635 * phy: only support when phy device exist on board
636 */
637 if (stringset == ETH_SS_TEST) {
638 /* clear loopback bit flags at first */
639 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
640 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
641 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
642 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
643 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
644 count += 1;
645 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
646 }
647
648 count += 1;
649 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
650 count += 1;
651 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
652 count += 1;
653 handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
654
655 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
656 hdev->hw.mac.phydev->drv->set_loopback) ||
657 hnae3_dev_phy_imp_supported(hdev)) {
658 count += 1;
659 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
660 }
661 } else if (stringset == ETH_SS_STATS) {
662 count = hclge_comm_get_count(hdev, g_mac_stats_string,
663 ARRAY_SIZE(g_mac_stats_string)) +
664 hclge_comm_tqps_get_sset_count(handle);
665 }
666
667 return count;
668 }
669
hclge_get_strings(struct hnae3_handle * handle,u32 stringset,u8 * data)670 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
671 u8 *data)
672 {
673 struct hclge_vport *vport = hclge_get_vport(handle);
674 struct hclge_dev *hdev = vport->back;
675 u8 *p = (char *)data;
676 int size;
677
678 if (stringset == ETH_SS_STATS) {
679 size = ARRAY_SIZE(g_mac_stats_string);
680 p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
681 size, p);
682 p = hclge_comm_tqps_get_strings(handle, p);
683 } else if (stringset == ETH_SS_TEST) {
684 if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
685 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
686 ETH_GSTRING_LEN);
687 p += ETH_GSTRING_LEN;
688 }
689 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
690 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
691 ETH_GSTRING_LEN);
692 p += ETH_GSTRING_LEN;
693 }
694 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
695 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
696 ETH_GSTRING_LEN);
697 p += ETH_GSTRING_LEN;
698 }
699 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
700 memcpy(p,
701 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
702 ETH_GSTRING_LEN);
703 p += ETH_GSTRING_LEN;
704 }
705 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
706 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
707 ETH_GSTRING_LEN);
708 p += ETH_GSTRING_LEN;
709 }
710 }
711 }
712
hclge_get_stats(struct hnae3_handle * handle,u64 * data)713 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
714 {
715 struct hclge_vport *vport = hclge_get_vport(handle);
716 struct hclge_dev *hdev = vport->back;
717 u64 *p;
718
719 p = hclge_comm_get_stats(hdev, g_mac_stats_string,
720 ARRAY_SIZE(g_mac_stats_string), data);
721 p = hclge_comm_tqps_get_stats(handle, p);
722 }
723
hclge_get_mac_stat(struct hnae3_handle * handle,struct hns3_mac_stats * mac_stats)724 static void hclge_get_mac_stat(struct hnae3_handle *handle,
725 struct hns3_mac_stats *mac_stats)
726 {
727 struct hclge_vport *vport = hclge_get_vport(handle);
728 struct hclge_dev *hdev = vport->back;
729
730 hclge_update_stats(handle);
731
732 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
733 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
734 }
735
hclge_parse_func_status(struct hclge_dev * hdev,struct hclge_func_status_cmd * status)736 static int hclge_parse_func_status(struct hclge_dev *hdev,
737 struct hclge_func_status_cmd *status)
738 {
739 #define HCLGE_MAC_ID_MASK 0xF
740
741 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
742 return -EINVAL;
743
744 /* Set the pf to main pf */
745 if (status->pf_state & HCLGE_PF_STATE_MAIN)
746 hdev->flag |= HCLGE_FLAG_MAIN;
747 else
748 hdev->flag &= ~HCLGE_FLAG_MAIN;
749
750 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
751 return 0;
752 }
753
hclge_query_function_status(struct hclge_dev * hdev)754 static int hclge_query_function_status(struct hclge_dev *hdev)
755 {
756 #define HCLGE_QUERY_MAX_CNT 5
757
758 struct hclge_func_status_cmd *req;
759 struct hclge_desc desc;
760 int timeout = 0;
761 int ret;
762
763 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
764 req = (struct hclge_func_status_cmd *)desc.data;
765
766 do {
767 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
768 if (ret) {
769 dev_err(&hdev->pdev->dev,
770 "query function status failed %d.\n", ret);
771 return ret;
772 }
773
774 /* Check pf reset is done */
775 if (req->pf_state)
776 break;
777 usleep_range(1000, 2000);
778 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
779
780 return hclge_parse_func_status(hdev, req);
781 }
782
hclge_query_pf_resource(struct hclge_dev * hdev)783 static int hclge_query_pf_resource(struct hclge_dev *hdev)
784 {
785 struct hclge_pf_res_cmd *req;
786 struct hclge_desc desc;
787 int ret;
788
789 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
790 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
791 if (ret) {
792 dev_err(&hdev->pdev->dev,
793 "query pf resource failed %d.\n", ret);
794 return ret;
795 }
796
797 req = (struct hclge_pf_res_cmd *)desc.data;
798 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
799 le16_to_cpu(req->ext_tqp_num);
800 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
801
802 if (req->tx_buf_size)
803 hdev->tx_buf_size =
804 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
805 else
806 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
807
808 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
809
810 if (req->dv_buf_size)
811 hdev->dv_buf_size =
812 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
813 else
814 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
815
816 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
817
818 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
819 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
820 dev_err(&hdev->pdev->dev,
821 "only %u msi resources available, not enough for pf(min:2).\n",
822 hdev->num_nic_msi);
823 return -EINVAL;
824 }
825
826 if (hnae3_dev_roce_supported(hdev)) {
827 hdev->num_roce_msi =
828 le16_to_cpu(req->pf_intr_vector_number_roce);
829
830 /* PF should have NIC vectors and Roce vectors,
831 * NIC vectors are queued before Roce vectors.
832 */
833 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
834 } else {
835 hdev->num_msi = hdev->num_nic_msi;
836 }
837
838 return 0;
839 }
840
hclge_parse_speed(u8 speed_cmd,u32 * speed)841 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
842 {
843 switch (speed_cmd) {
844 case HCLGE_FW_MAC_SPEED_10M:
845 *speed = HCLGE_MAC_SPEED_10M;
846 break;
847 case HCLGE_FW_MAC_SPEED_100M:
848 *speed = HCLGE_MAC_SPEED_100M;
849 break;
850 case HCLGE_FW_MAC_SPEED_1G:
851 *speed = HCLGE_MAC_SPEED_1G;
852 break;
853 case HCLGE_FW_MAC_SPEED_10G:
854 *speed = HCLGE_MAC_SPEED_10G;
855 break;
856 case HCLGE_FW_MAC_SPEED_25G:
857 *speed = HCLGE_MAC_SPEED_25G;
858 break;
859 case HCLGE_FW_MAC_SPEED_40G:
860 *speed = HCLGE_MAC_SPEED_40G;
861 break;
862 case HCLGE_FW_MAC_SPEED_50G:
863 *speed = HCLGE_MAC_SPEED_50G;
864 break;
865 case HCLGE_FW_MAC_SPEED_100G:
866 *speed = HCLGE_MAC_SPEED_100G;
867 break;
868 case HCLGE_FW_MAC_SPEED_200G:
869 *speed = HCLGE_MAC_SPEED_200G;
870 break;
871 default:
872 return -EINVAL;
873 }
874
875 return 0;
876 }
877
878 static const struct hclge_speed_bit_map speed_bit_map[] = {
879 {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
880 {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
881 {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
882 {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
883 {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
884 {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
885 {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
886 {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
887 {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
888 };
889
hclge_get_speed_bit(u32 speed,u32 * speed_bit)890 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
891 {
892 u16 i;
893
894 for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
895 if (speed == speed_bit_map[i].speed) {
896 *speed_bit = speed_bit_map[i].speed_bit;
897 return 0;
898 }
899 }
900
901 return -EINVAL;
902 }
903
hclge_check_port_speed(struct hnae3_handle * handle,u32 speed)904 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
905 {
906 struct hclge_vport *vport = hclge_get_vport(handle);
907 struct hclge_dev *hdev = vport->back;
908 u32 speed_ability = hdev->hw.mac.speed_ability;
909 u32 speed_bit = 0;
910 int ret;
911
912 ret = hclge_get_speed_bit(speed, &speed_bit);
913 if (ret)
914 return ret;
915
916 if (speed_bit & speed_ability)
917 return 0;
918
919 return -EINVAL;
920 }
921
hclge_update_fec_support(struct hclge_mac * mac)922 static void hclge_update_fec_support(struct hclge_mac *mac)
923 {
924 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
925 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
926 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported);
927 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
928
929 if (mac->fec_ability & BIT(HNAE3_FEC_BASER))
930 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
931 mac->supported);
932 if (mac->fec_ability & BIT(HNAE3_FEC_RS))
933 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
934 mac->supported);
935 if (mac->fec_ability & BIT(HNAE3_FEC_LLRS))
936 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
937 mac->supported);
938 if (mac->fec_ability & BIT(HNAE3_FEC_NONE))
939 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
940 mac->supported);
941 }
942
hclge_convert_setting_sr(u16 speed_ability,unsigned long * link_mode)943 static void hclge_convert_setting_sr(u16 speed_ability,
944 unsigned long *link_mode)
945 {
946 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
947 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
948 link_mode);
949 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
950 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
951 link_mode);
952 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
953 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
954 link_mode);
955 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
956 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
957 link_mode);
958 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
959 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
960 link_mode);
961 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
962 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
963 link_mode);
964 }
965
hclge_convert_setting_lr(u16 speed_ability,unsigned long * link_mode)966 static void hclge_convert_setting_lr(u16 speed_ability,
967 unsigned long *link_mode)
968 {
969 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
970 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
971 link_mode);
972 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
973 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
974 link_mode);
975 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
976 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
977 link_mode);
978 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
979 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
980 link_mode);
981 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
982 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
983 link_mode);
984 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
985 linkmode_set_bit(
986 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
987 link_mode);
988 }
989
hclge_convert_setting_cr(u16 speed_ability,unsigned long * link_mode)990 static void hclge_convert_setting_cr(u16 speed_ability,
991 unsigned long *link_mode)
992 {
993 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
994 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
995 link_mode);
996 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
997 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
998 link_mode);
999 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1000 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1001 link_mode);
1002 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1003 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1004 link_mode);
1005 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1006 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1007 link_mode);
1008 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1009 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1010 link_mode);
1011 }
1012
hclge_convert_setting_kr(u16 speed_ability,unsigned long * link_mode)1013 static void hclge_convert_setting_kr(u16 speed_ability,
1014 unsigned long *link_mode)
1015 {
1016 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1018 link_mode);
1019 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1021 link_mode);
1022 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1024 link_mode);
1025 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1027 link_mode);
1028 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1030 link_mode);
1031 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1033 link_mode);
1034 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1035 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1036 link_mode);
1037 }
1038
hclge_convert_setting_fec(struct hclge_mac * mac)1039 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1040 {
1041 /* If firmware has reported fec_ability, don't need to convert by speed */
1042 if (mac->fec_ability)
1043 goto out;
1044
1045 switch (mac->speed) {
1046 case HCLGE_MAC_SPEED_10G:
1047 case HCLGE_MAC_SPEED_40G:
1048 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) |
1049 BIT(HNAE3_FEC_NONE);
1050 break;
1051 case HCLGE_MAC_SPEED_25G:
1052 case HCLGE_MAC_SPEED_50G:
1053 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1054 BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE);
1055 break;
1056 case HCLGE_MAC_SPEED_100G:
1057 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
1058 BIT(HNAE3_FEC_NONE);
1059 break;
1060 case HCLGE_MAC_SPEED_200G:
1061 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
1062 BIT(HNAE3_FEC_LLRS);
1063 break;
1064 default:
1065 mac->fec_ability = 0;
1066 break;
1067 }
1068
1069 out:
1070 hclge_update_fec_support(mac);
1071 }
1072
hclge_parse_fiber_link_mode(struct hclge_dev * hdev,u16 speed_ability)1073 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1074 u16 speed_ability)
1075 {
1076 struct hclge_mac *mac = &hdev->hw.mac;
1077
1078 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1079 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1080 mac->supported);
1081
1082 hclge_convert_setting_sr(speed_ability, mac->supported);
1083 hclge_convert_setting_lr(speed_ability, mac->supported);
1084 hclge_convert_setting_cr(speed_ability, mac->supported);
1085 if (hnae3_dev_fec_supported(hdev))
1086 hclge_convert_setting_fec(mac);
1087
1088 if (hnae3_dev_pause_supported(hdev))
1089 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1090
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1092 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1093 }
1094
hclge_parse_backplane_link_mode(struct hclge_dev * hdev,u16 speed_ability)1095 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1096 u16 speed_ability)
1097 {
1098 struct hclge_mac *mac = &hdev->hw.mac;
1099
1100 hclge_convert_setting_kr(speed_ability, mac->supported);
1101 if (hnae3_dev_fec_supported(hdev))
1102 hclge_convert_setting_fec(mac);
1103
1104 if (hnae3_dev_pause_supported(hdev))
1105 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1106
1107 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1108 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1109 }
1110
hclge_parse_copper_link_mode(struct hclge_dev * hdev,u16 speed_ability)1111 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1112 u16 speed_ability)
1113 {
1114 unsigned long *supported = hdev->hw.mac.supported;
1115
1116 /* default to support all speed for GE port */
1117 if (!speed_ability)
1118 speed_ability = HCLGE_SUPPORT_GE;
1119
1120 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1121 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1122 supported);
1123
1124 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1126 supported);
1127 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1128 supported);
1129 }
1130
1131 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1132 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1133 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1134 }
1135
1136 if (hnae3_dev_pause_supported(hdev)) {
1137 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1138 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1139 }
1140
1141 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1142 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1143 }
1144
hclge_parse_link_mode(struct hclge_dev * hdev,u16 speed_ability)1145 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1146 {
1147 u8 media_type = hdev->hw.mac.media_type;
1148
1149 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1150 hclge_parse_fiber_link_mode(hdev, speed_ability);
1151 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1152 hclge_parse_copper_link_mode(hdev, speed_ability);
1153 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1154 hclge_parse_backplane_link_mode(hdev, speed_ability);
1155 }
1156
hclge_get_max_speed(u16 speed_ability)1157 static u32 hclge_get_max_speed(u16 speed_ability)
1158 {
1159 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1160 return HCLGE_MAC_SPEED_200G;
1161
1162 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1163 return HCLGE_MAC_SPEED_100G;
1164
1165 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1166 return HCLGE_MAC_SPEED_50G;
1167
1168 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1169 return HCLGE_MAC_SPEED_40G;
1170
1171 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1172 return HCLGE_MAC_SPEED_25G;
1173
1174 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1175 return HCLGE_MAC_SPEED_10G;
1176
1177 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1178 return HCLGE_MAC_SPEED_1G;
1179
1180 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1181 return HCLGE_MAC_SPEED_100M;
1182
1183 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1184 return HCLGE_MAC_SPEED_10M;
1185
1186 return HCLGE_MAC_SPEED_1G;
1187 }
1188
hclge_parse_cfg(struct hclge_cfg * cfg,struct hclge_desc * desc)1189 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1190 {
1191 #define HCLGE_TX_SPARE_SIZE_UNIT 4096
1192 #define SPEED_ABILITY_EXT_SHIFT 8
1193
1194 struct hclge_cfg_param_cmd *req;
1195 u64 mac_addr_tmp_high;
1196 u16 speed_ability_ext;
1197 u64 mac_addr_tmp;
1198 unsigned int i;
1199
1200 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1201
1202 /* get the configuration */
1203 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1204 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1205 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1206 HCLGE_CFG_TQP_DESC_N_M,
1207 HCLGE_CFG_TQP_DESC_N_S);
1208
1209 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1210 HCLGE_CFG_PHY_ADDR_M,
1211 HCLGE_CFG_PHY_ADDR_S);
1212 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1213 HCLGE_CFG_MEDIA_TP_M,
1214 HCLGE_CFG_MEDIA_TP_S);
1215 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1216 HCLGE_CFG_RX_BUF_LEN_M,
1217 HCLGE_CFG_RX_BUF_LEN_S);
1218 /* get mac_address */
1219 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1220 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1221 HCLGE_CFG_MAC_ADDR_H_M,
1222 HCLGE_CFG_MAC_ADDR_H_S);
1223
1224 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1225
1226 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1227 HCLGE_CFG_DEFAULT_SPEED_M,
1228 HCLGE_CFG_DEFAULT_SPEED_S);
1229 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1230 HCLGE_CFG_RSS_SIZE_M,
1231 HCLGE_CFG_RSS_SIZE_S);
1232
1233 for (i = 0; i < ETH_ALEN; i++)
1234 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1235
1236 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1237 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1238
1239 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1240 HCLGE_CFG_SPEED_ABILITY_M,
1241 HCLGE_CFG_SPEED_ABILITY_S);
1242 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1243 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1244 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1245 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1246
1247 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1248 HCLGE_CFG_VLAN_FLTR_CAP_M,
1249 HCLGE_CFG_VLAN_FLTR_CAP_S);
1250
1251 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252 HCLGE_CFG_UMV_TBL_SPACE_M,
1253 HCLGE_CFG_UMV_TBL_SPACE_S);
1254
1255 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1256 HCLGE_CFG_PF_RSS_SIZE_M,
1257 HCLGE_CFG_PF_RSS_SIZE_S);
1258
1259 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1260 * power of 2, instead of reading out directly. This would
1261 * be more flexible for future changes and expansions.
1262 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1263 * it does not make sense if PF's field is 0. In this case, PF and VF
1264 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1265 */
1266 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1267 1U << cfg->pf_rss_size_max :
1268 cfg->vf_rss_size_max;
1269
1270 /* The unit of the tx spare buffer size queried from configuration
1271 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1272 * needed here.
1273 */
1274 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1275 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1276 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1277 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1278 }
1279
1280 /* hclge_get_cfg: query the static parameter from flash
1281 * @hdev: pointer to struct hclge_dev
1282 * @hcfg: the config structure to be getted
1283 */
hclge_get_cfg(struct hclge_dev * hdev,struct hclge_cfg * hcfg)1284 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1285 {
1286 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1287 struct hclge_cfg_param_cmd *req;
1288 unsigned int i;
1289 int ret;
1290
1291 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1292 u32 offset = 0;
1293
1294 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1295 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1296 true);
1297 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1298 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1299 /* Len should be united by 4 bytes when send to hardware */
1300 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1301 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1302 req->offset = cpu_to_le32(offset);
1303 }
1304
1305 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1306 if (ret) {
1307 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1308 return ret;
1309 }
1310
1311 hclge_parse_cfg(hcfg, desc);
1312
1313 return 0;
1314 }
1315
hclge_set_default_dev_specs(struct hclge_dev * hdev)1316 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1317 {
1318 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1319
1320 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1321
1322 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1323 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1324 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
1325 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1326 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1327 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1328 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1329 ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1330 ae_dev->dev_specs.tnl_num = 0;
1331 }
1332
hclge_parse_dev_specs(struct hclge_dev * hdev,struct hclge_desc * desc)1333 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1334 struct hclge_desc *desc)
1335 {
1336 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1337 struct hclge_dev_specs_0_cmd *req0;
1338 struct hclge_dev_specs_1_cmd *req1;
1339
1340 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1341 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1342
1343 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1344 ae_dev->dev_specs.rss_ind_tbl_size =
1345 le16_to_cpu(req0->rss_ind_tbl_size);
1346 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1347 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1348 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1349 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1350 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1351 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1352 ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
1353 ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
1354 ae_dev->dev_specs.tnl_num = req1->tnl_num;
1355 }
1356
hclge_check_dev_specs(struct hclge_dev * hdev)1357 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1358 {
1359 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1360
1361 if (!dev_specs->max_non_tso_bd_num)
1362 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1363 if (!dev_specs->rss_ind_tbl_size)
1364 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1365 if (!dev_specs->rss_key_size)
1366 dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
1367 if (!dev_specs->max_tm_rate)
1368 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1369 if (!dev_specs->max_qset_num)
1370 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1371 if (!dev_specs->max_int_gl)
1372 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1373 if (!dev_specs->max_frm_size)
1374 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1375 if (!dev_specs->umv_size)
1376 dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1377 }
1378
hclge_query_mac_stats_num(struct hclge_dev * hdev)1379 static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
1380 {
1381 u32 reg_num = 0;
1382 int ret;
1383
1384 ret = hclge_mac_query_reg_num(hdev, ®_num);
1385 if (ret && ret != -EOPNOTSUPP)
1386 return ret;
1387
1388 hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
1389 return 0;
1390 }
1391
hclge_query_dev_specs(struct hclge_dev * hdev)1392 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1393 {
1394 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1395 int ret;
1396 int i;
1397
1398 ret = hclge_query_mac_stats_num(hdev);
1399 if (ret)
1400 return ret;
1401
1402 /* set default specifications as devices lower than version V3 do not
1403 * support querying specifications from firmware.
1404 */
1405 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1406 hclge_set_default_dev_specs(hdev);
1407 return 0;
1408 }
1409
1410 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1411 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1412 true);
1413 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1414 }
1415 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1416
1417 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1418 if (ret)
1419 return ret;
1420
1421 hclge_parse_dev_specs(hdev, desc);
1422 hclge_check_dev_specs(hdev);
1423
1424 return 0;
1425 }
1426
hclge_get_cap(struct hclge_dev * hdev)1427 static int hclge_get_cap(struct hclge_dev *hdev)
1428 {
1429 int ret;
1430
1431 ret = hclge_query_function_status(hdev);
1432 if (ret) {
1433 dev_err(&hdev->pdev->dev,
1434 "query function status error %d.\n", ret);
1435 return ret;
1436 }
1437
1438 /* get pf resource */
1439 return hclge_query_pf_resource(hdev);
1440 }
1441
hclge_init_kdump_kernel_config(struct hclge_dev * hdev)1442 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1443 {
1444 #define HCLGE_MIN_TX_DESC 64
1445 #define HCLGE_MIN_RX_DESC 64
1446
1447 if (!is_kdump_kernel())
1448 return;
1449
1450 dev_info(&hdev->pdev->dev,
1451 "Running kdump kernel. Using minimal resources\n");
1452
1453 /* minimal queue pairs equals to the number of vports */
1454 hdev->num_tqps = hdev->num_req_vfs + 1;
1455 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1456 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1457 }
1458
hclge_init_tc_config(struct hclge_dev * hdev)1459 static void hclge_init_tc_config(struct hclge_dev *hdev)
1460 {
1461 unsigned int i;
1462
1463 if (hdev->tc_max > HNAE3_MAX_TC ||
1464 hdev->tc_max < 1) {
1465 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1466 hdev->tc_max);
1467 hdev->tc_max = 1;
1468 }
1469
1470 /* Dev does not support DCB */
1471 if (!hnae3_dev_dcb_supported(hdev)) {
1472 hdev->tc_max = 1;
1473 hdev->pfc_max = 0;
1474 } else {
1475 hdev->pfc_max = hdev->tc_max;
1476 }
1477
1478 hdev->tm_info.num_tc = 1;
1479
1480 /* Currently not support uncontiuous tc */
1481 for (i = 0; i < hdev->tm_info.num_tc; i++)
1482 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1483
1484 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1485 }
1486
hclge_configure(struct hclge_dev * hdev)1487 static int hclge_configure(struct hclge_dev *hdev)
1488 {
1489 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1490 struct hclge_cfg cfg;
1491 int ret;
1492
1493 ret = hclge_get_cfg(hdev, &cfg);
1494 if (ret)
1495 return ret;
1496
1497 hdev->base_tqp_pid = 0;
1498 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1499 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1500 hdev->rx_buf_len = cfg.rx_buf_len;
1501 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1502 hdev->hw.mac.media_type = cfg.media_type;
1503 hdev->hw.mac.phy_addr = cfg.phy_addr;
1504 hdev->num_tx_desc = cfg.tqp_desc_num;
1505 hdev->num_rx_desc = cfg.tqp_desc_num;
1506 hdev->tm_info.num_pg = 1;
1507 hdev->tc_max = cfg.tc_num;
1508 hdev->tm_info.hw_pfc_map = 0;
1509 if (cfg.umv_space)
1510 hdev->wanted_umv_size = cfg.umv_space;
1511 else
1512 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
1513 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1514 hdev->gro_en = true;
1515 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1516 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1517
1518 if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
1519 hdev->fd_en = true;
1520 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1521 }
1522
1523 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1524 if (ret) {
1525 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1526 cfg.default_speed, ret);
1527 return ret;
1528 }
1529 hdev->hw.mac.req_speed = hdev->hw.mac.speed;
1530 hdev->hw.mac.req_autoneg = AUTONEG_ENABLE;
1531 hdev->hw.mac.req_duplex = DUPLEX_FULL;
1532
1533 hclge_parse_link_mode(hdev, cfg.speed_ability);
1534
1535 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1536
1537 hclge_init_tc_config(hdev);
1538 hclge_init_kdump_kernel_config(hdev);
1539
1540 return ret;
1541 }
1542
hclge_config_tso(struct hclge_dev * hdev,u16 tso_mss_min,u16 tso_mss_max)1543 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1544 u16 tso_mss_max)
1545 {
1546 struct hclge_cfg_tso_status_cmd *req;
1547 struct hclge_desc desc;
1548
1549 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1550
1551 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1552 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1553 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1554
1555 return hclge_cmd_send(&hdev->hw, &desc, 1);
1556 }
1557
hclge_config_gro(struct hclge_dev * hdev)1558 static int hclge_config_gro(struct hclge_dev *hdev)
1559 {
1560 struct hclge_cfg_gro_status_cmd *req;
1561 struct hclge_desc desc;
1562 int ret;
1563
1564 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
1565 return 0;
1566
1567 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1568 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1569
1570 req->gro_en = hdev->gro_en ? 1 : 0;
1571
1572 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1573 if (ret)
1574 dev_err(&hdev->pdev->dev,
1575 "GRO hardware config cmd failed, ret = %d\n", ret);
1576
1577 return ret;
1578 }
1579
hclge_alloc_tqps(struct hclge_dev * hdev)1580 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1581 {
1582 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1583 struct hclge_comm_tqp *tqp;
1584 int i;
1585
1586 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1587 sizeof(struct hclge_comm_tqp), GFP_KERNEL);
1588 if (!hdev->htqp)
1589 return -ENOMEM;
1590
1591 tqp = hdev->htqp;
1592
1593 for (i = 0; i < hdev->num_tqps; i++) {
1594 tqp->dev = &hdev->pdev->dev;
1595 tqp->index = i;
1596
1597 tqp->q.ae_algo = &ae_algo;
1598 tqp->q.buf_size = hdev->rx_buf_len;
1599 tqp->q.tx_desc_num = hdev->num_tx_desc;
1600 tqp->q.rx_desc_num = hdev->num_rx_desc;
1601
1602 /* need an extended offset to configure queues >=
1603 * HCLGE_TQP_MAX_SIZE_DEV_V2
1604 */
1605 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1606 tqp->q.io_base = hdev->hw.hw.io_base +
1607 HCLGE_TQP_REG_OFFSET +
1608 i * HCLGE_TQP_REG_SIZE;
1609 else
1610 tqp->q.io_base = hdev->hw.hw.io_base +
1611 HCLGE_TQP_REG_OFFSET +
1612 HCLGE_TQP_EXT_REG_OFFSET +
1613 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1614 HCLGE_TQP_REG_SIZE;
1615
1616 /* when device supports tx push and has device memory,
1617 * the queue can execute push mode or doorbell mode on
1618 * device memory.
1619 */
1620 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
1621 tqp->q.mem_base = hdev->hw.hw.mem_base +
1622 HCLGE_TQP_MEM_OFFSET(hdev, i);
1623
1624 tqp++;
1625 }
1626
1627 return 0;
1628 }
1629
hclge_map_tqps_to_func(struct hclge_dev * hdev,u16 func_id,u16 tqp_pid,u16 tqp_vid,bool is_pf)1630 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1631 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1632 {
1633 struct hclge_tqp_map_cmd *req;
1634 struct hclge_desc desc;
1635 int ret;
1636
1637 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1638
1639 req = (struct hclge_tqp_map_cmd *)desc.data;
1640 req->tqp_id = cpu_to_le16(tqp_pid);
1641 req->tqp_vf = func_id;
1642 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1643 if (!is_pf)
1644 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1645 req->tqp_vid = cpu_to_le16(tqp_vid);
1646
1647 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1648 if (ret)
1649 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1650
1651 return ret;
1652 }
1653
hclge_assign_tqp(struct hclge_vport * vport,u16 num_tqps)1654 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1655 {
1656 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1657 struct hclge_dev *hdev = vport->back;
1658 int i, alloced;
1659
1660 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1661 alloced < num_tqps; i++) {
1662 if (!hdev->htqp[i].alloced) {
1663 hdev->htqp[i].q.handle = &vport->nic;
1664 hdev->htqp[i].q.tqp_index = alloced;
1665 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1666 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1667 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1668 hdev->htqp[i].alloced = true;
1669 alloced++;
1670 }
1671 }
1672 vport->alloc_tqps = alloced;
1673 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1674 vport->alloc_tqps / hdev->tm_info.num_tc);
1675
1676 /* ensure one to one mapping between irq and queue at default */
1677 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1678 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1679
1680 return 0;
1681 }
1682
hclge_knic_setup(struct hclge_vport * vport,u16 num_tqps,u16 num_tx_desc,u16 num_rx_desc)1683 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1684 u16 num_tx_desc, u16 num_rx_desc)
1685
1686 {
1687 struct hnae3_handle *nic = &vport->nic;
1688 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1689 struct hclge_dev *hdev = vport->back;
1690 int ret;
1691
1692 kinfo->num_tx_desc = num_tx_desc;
1693 kinfo->num_rx_desc = num_rx_desc;
1694
1695 kinfo->rx_buf_len = hdev->rx_buf_len;
1696 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1697
1698 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1699 sizeof(struct hnae3_queue *), GFP_KERNEL);
1700 if (!kinfo->tqp)
1701 return -ENOMEM;
1702
1703 ret = hclge_assign_tqp(vport, num_tqps);
1704 if (ret)
1705 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1706
1707 return ret;
1708 }
1709
hclge_map_tqp_to_vport(struct hclge_dev * hdev,struct hclge_vport * vport)1710 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1711 struct hclge_vport *vport)
1712 {
1713 struct hnae3_handle *nic = &vport->nic;
1714 struct hnae3_knic_private_info *kinfo;
1715 u16 i;
1716
1717 kinfo = &nic->kinfo;
1718 for (i = 0; i < vport->alloc_tqps; i++) {
1719 struct hclge_comm_tqp *q =
1720 container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
1721 bool is_pf;
1722 int ret;
1723
1724 is_pf = !(vport->vport_id);
1725 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1726 i, is_pf);
1727 if (ret)
1728 return ret;
1729 }
1730
1731 return 0;
1732 }
1733
hclge_map_tqp(struct hclge_dev * hdev)1734 static int hclge_map_tqp(struct hclge_dev *hdev)
1735 {
1736 struct hclge_vport *vport = hdev->vport;
1737 u16 i, num_vport;
1738
1739 num_vport = hdev->num_req_vfs + 1;
1740 for (i = 0; i < num_vport; i++) {
1741 int ret;
1742
1743 ret = hclge_map_tqp_to_vport(hdev, vport);
1744 if (ret)
1745 return ret;
1746
1747 vport++;
1748 }
1749
1750 return 0;
1751 }
1752
hclge_vport_setup(struct hclge_vport * vport,u16 num_tqps)1753 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1754 {
1755 struct hnae3_handle *nic = &vport->nic;
1756 struct hclge_dev *hdev = vport->back;
1757 int ret;
1758
1759 nic->pdev = hdev->pdev;
1760 nic->ae_algo = &ae_algo;
1761 bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
1762 MAX_NUMNODES);
1763 nic->kinfo.io_base = hdev->hw.hw.io_base;
1764
1765 ret = hclge_knic_setup(vport, num_tqps,
1766 hdev->num_tx_desc, hdev->num_rx_desc);
1767 if (ret)
1768 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1769
1770 return ret;
1771 }
1772
hclge_alloc_vport(struct hclge_dev * hdev)1773 static int hclge_alloc_vport(struct hclge_dev *hdev)
1774 {
1775 struct pci_dev *pdev = hdev->pdev;
1776 struct hclge_vport *vport;
1777 u32 tqp_main_vport;
1778 u32 tqp_per_vport;
1779 int num_vport, i;
1780 int ret;
1781
1782 /* We need to alloc a vport for main NIC of PF */
1783 num_vport = hdev->num_req_vfs + 1;
1784
1785 if (hdev->num_tqps < num_vport) {
1786 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1787 hdev->num_tqps, num_vport);
1788 return -EINVAL;
1789 }
1790
1791 /* Alloc the same number of TQPs for every vport */
1792 tqp_per_vport = hdev->num_tqps / num_vport;
1793 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1794
1795 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1796 GFP_KERNEL);
1797 if (!vport)
1798 return -ENOMEM;
1799
1800 hdev->vport = vport;
1801 hdev->num_alloc_vport = num_vport;
1802
1803 if (IS_ENABLED(CONFIG_PCI_IOV))
1804 hdev->num_alloc_vfs = hdev->num_req_vfs;
1805
1806 for (i = 0; i < num_vport; i++) {
1807 vport->back = hdev;
1808 vport->vport_id = i;
1809 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1810 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1811 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1812 vport->port_base_vlan_cfg.tbl_sta = true;
1813 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1814 vport->req_vlan_fltr_en = true;
1815 INIT_LIST_HEAD(&vport->vlan_list);
1816 INIT_LIST_HEAD(&vport->uc_mac_list);
1817 INIT_LIST_HEAD(&vport->mc_mac_list);
1818 spin_lock_init(&vport->mac_list_lock);
1819
1820 if (i == 0)
1821 ret = hclge_vport_setup(vport, tqp_main_vport);
1822 else
1823 ret = hclge_vport_setup(vport, tqp_per_vport);
1824 if (ret) {
1825 dev_err(&pdev->dev,
1826 "vport setup failed for vport %d, %d\n",
1827 i, ret);
1828 return ret;
1829 }
1830
1831 vport++;
1832 }
1833
1834 return 0;
1835 }
1836
hclge_cmd_alloc_tx_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1837 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1838 struct hclge_pkt_buf_alloc *buf_alloc)
1839 {
1840 /* TX buffer size is unit by 128 byte */
1841 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1842 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1843 struct hclge_tx_buff_alloc_cmd *req;
1844 struct hclge_desc desc;
1845 int ret;
1846 u8 i;
1847
1848 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1849
1850 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1851 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1852 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1853
1854 req->tx_pkt_buff[i] =
1855 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1856 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1857 }
1858
1859 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1860 if (ret)
1861 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1862 ret);
1863
1864 return ret;
1865 }
1866
hclge_tx_buffer_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1867 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1868 struct hclge_pkt_buf_alloc *buf_alloc)
1869 {
1870 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1871
1872 if (ret)
1873 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1874
1875 return ret;
1876 }
1877
hclge_get_tc_num(struct hclge_dev * hdev)1878 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1879 {
1880 unsigned int i;
1881 u32 cnt = 0;
1882
1883 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1884 if (hdev->hw_tc_map & BIT(i))
1885 cnt++;
1886 return cnt;
1887 }
1888
1889 /* Get the number of pfc enabled TCs, which have private buffer */
hclge_get_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1890 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1891 struct hclge_pkt_buf_alloc *buf_alloc)
1892 {
1893 struct hclge_priv_buf *priv;
1894 unsigned int i;
1895 int cnt = 0;
1896
1897 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1898 priv = &buf_alloc->priv_buf[i];
1899 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1900 priv->enable)
1901 cnt++;
1902 }
1903
1904 return cnt;
1905 }
1906
1907 /* Get the number of pfc disabled TCs, which have private buffer */
hclge_get_no_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1908 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1909 struct hclge_pkt_buf_alloc *buf_alloc)
1910 {
1911 struct hclge_priv_buf *priv;
1912 unsigned int i;
1913 int cnt = 0;
1914
1915 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1916 priv = &buf_alloc->priv_buf[i];
1917 if (hdev->hw_tc_map & BIT(i) &&
1918 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1919 priv->enable)
1920 cnt++;
1921 }
1922
1923 return cnt;
1924 }
1925
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1926 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1927 {
1928 struct hclge_priv_buf *priv;
1929 u32 rx_priv = 0;
1930 int i;
1931
1932 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1933 priv = &buf_alloc->priv_buf[i];
1934 if (priv->enable)
1935 rx_priv += priv->buf_size;
1936 }
1937 return rx_priv;
1938 }
1939
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1940 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1941 {
1942 u32 i, total_tx_size = 0;
1943
1944 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1945 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1946
1947 return total_tx_size;
1948 }
1949
hclge_is_rx_buf_ok(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc,u32 rx_all)1950 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1951 struct hclge_pkt_buf_alloc *buf_alloc,
1952 u32 rx_all)
1953 {
1954 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1955 u32 tc_num = hclge_get_tc_num(hdev);
1956 u32 shared_buf, aligned_mps;
1957 u32 rx_priv;
1958 int i;
1959
1960 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1961
1962 if (hnae3_dev_dcb_supported(hdev))
1963 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1964 hdev->dv_buf_size;
1965 else
1966 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1967 + hdev->dv_buf_size;
1968
1969 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1970 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1971 HCLGE_BUF_SIZE_UNIT);
1972
1973 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1974 if (rx_all < rx_priv + shared_std)
1975 return false;
1976
1977 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1978 buf_alloc->s_buf.buf_size = shared_buf;
1979 if (hnae3_dev_dcb_supported(hdev)) {
1980 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1981 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1982 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1983 HCLGE_BUF_SIZE_UNIT);
1984 } else {
1985 buf_alloc->s_buf.self.high = aligned_mps +
1986 HCLGE_NON_DCB_ADDITIONAL_BUF;
1987 buf_alloc->s_buf.self.low = aligned_mps;
1988 }
1989
1990 if (hnae3_dev_dcb_supported(hdev)) {
1991 hi_thrd = shared_buf - hdev->dv_buf_size;
1992
1993 if (tc_num <= NEED_RESERVE_TC_NUM)
1994 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1995 / BUF_MAX_PERCENT;
1996
1997 if (tc_num)
1998 hi_thrd = hi_thrd / tc_num;
1999
2000 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2001 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2002 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2003 } else {
2004 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2005 lo_thrd = aligned_mps;
2006 }
2007
2008 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2009 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2010 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2011 }
2012
2013 return true;
2014 }
2015
hclge_tx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2016 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2017 struct hclge_pkt_buf_alloc *buf_alloc)
2018 {
2019 u32 i, total_size;
2020
2021 total_size = hdev->pkt_buf_size;
2022
2023 /* alloc tx buffer for all enabled tc */
2024 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2025 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2026
2027 if (hdev->hw_tc_map & BIT(i)) {
2028 if (total_size < hdev->tx_buf_size)
2029 return -ENOMEM;
2030
2031 priv->tx_buf_size = hdev->tx_buf_size;
2032 } else {
2033 priv->tx_buf_size = 0;
2034 }
2035
2036 total_size -= priv->tx_buf_size;
2037 }
2038
2039 return 0;
2040 }
2041
hclge_rx_buf_calc_all(struct hclge_dev * hdev,bool max,struct hclge_pkt_buf_alloc * buf_alloc)2042 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2043 struct hclge_pkt_buf_alloc *buf_alloc)
2044 {
2045 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2046 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2047 unsigned int i;
2048
2049 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2050 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2051
2052 priv->enable = 0;
2053 priv->wl.low = 0;
2054 priv->wl.high = 0;
2055 priv->buf_size = 0;
2056
2057 if (!(hdev->hw_tc_map & BIT(i)))
2058 continue;
2059
2060 priv->enable = 1;
2061
2062 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2063 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2064 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2065 HCLGE_BUF_SIZE_UNIT);
2066 } else {
2067 priv->wl.low = 0;
2068 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2069 aligned_mps;
2070 }
2071
2072 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2073 }
2074
2075 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2076 }
2077
hclge_drop_nopfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2078 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2079 struct hclge_pkt_buf_alloc *buf_alloc)
2080 {
2081 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2082 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2083 int i;
2084
2085 /* let the last to be cleared first */
2086 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2087 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2088 unsigned int mask = BIT((unsigned int)i);
2089
2090 if (hdev->hw_tc_map & mask &&
2091 !(hdev->tm_info.hw_pfc_map & mask)) {
2092 /* Clear the no pfc TC private buffer */
2093 priv->wl.low = 0;
2094 priv->wl.high = 0;
2095 priv->buf_size = 0;
2096 priv->enable = 0;
2097 no_pfc_priv_num--;
2098 }
2099
2100 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2101 no_pfc_priv_num == 0)
2102 break;
2103 }
2104
2105 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2106 }
2107
hclge_drop_pfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2108 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2109 struct hclge_pkt_buf_alloc *buf_alloc)
2110 {
2111 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2112 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2113 int i;
2114
2115 /* let the last to be cleared first */
2116 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2117 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2118 unsigned int mask = BIT((unsigned int)i);
2119
2120 if (hdev->hw_tc_map & mask &&
2121 hdev->tm_info.hw_pfc_map & mask) {
2122 /* Reduce the number of pfc TC with private buffer */
2123 priv->wl.low = 0;
2124 priv->enable = 0;
2125 priv->wl.high = 0;
2126 priv->buf_size = 0;
2127 pfc_priv_num--;
2128 }
2129
2130 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2131 pfc_priv_num == 0)
2132 break;
2133 }
2134
2135 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2136 }
2137
hclge_only_alloc_priv_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2138 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2139 struct hclge_pkt_buf_alloc *buf_alloc)
2140 {
2141 #define COMPENSATE_BUFFER 0x3C00
2142 #define COMPENSATE_HALF_MPS_NUM 5
2143 #define PRIV_WL_GAP 0x1800
2144
2145 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2146 u32 tc_num = hclge_get_tc_num(hdev);
2147 u32 half_mps = hdev->mps >> 1;
2148 u32 min_rx_priv;
2149 unsigned int i;
2150
2151 if (tc_num)
2152 rx_priv = rx_priv / tc_num;
2153
2154 if (tc_num <= NEED_RESERVE_TC_NUM)
2155 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2156
2157 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2158 COMPENSATE_HALF_MPS_NUM * half_mps;
2159 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2160 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2161 if (rx_priv < min_rx_priv)
2162 return false;
2163
2164 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2165 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2166
2167 priv->enable = 0;
2168 priv->wl.low = 0;
2169 priv->wl.high = 0;
2170 priv->buf_size = 0;
2171
2172 if (!(hdev->hw_tc_map & BIT(i)))
2173 continue;
2174
2175 priv->enable = 1;
2176 priv->buf_size = rx_priv;
2177 priv->wl.high = rx_priv - hdev->dv_buf_size;
2178 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2179 }
2180
2181 buf_alloc->s_buf.buf_size = 0;
2182
2183 return true;
2184 }
2185
2186 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2187 * @hdev: pointer to struct hclge_dev
2188 * @buf_alloc: pointer to buffer calculation data
2189 * @return: 0: calculate successful, negative: fail
2190 */
hclge_rx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2191 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2192 struct hclge_pkt_buf_alloc *buf_alloc)
2193 {
2194 /* When DCB is not supported, rx private buffer is not allocated. */
2195 if (!hnae3_dev_dcb_supported(hdev)) {
2196 u32 rx_all = hdev->pkt_buf_size;
2197
2198 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2199 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2200 return -ENOMEM;
2201
2202 return 0;
2203 }
2204
2205 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2206 return 0;
2207
2208 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2209 return 0;
2210
2211 /* try to decrease the buffer size */
2212 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2213 return 0;
2214
2215 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2216 return 0;
2217
2218 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2219 return 0;
2220
2221 return -ENOMEM;
2222 }
2223
hclge_rx_priv_buf_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2224 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2225 struct hclge_pkt_buf_alloc *buf_alloc)
2226 {
2227 struct hclge_rx_priv_buff_cmd *req;
2228 struct hclge_desc desc;
2229 int ret;
2230 int i;
2231
2232 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2233 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2234
2235 /* Alloc private buffer TCs */
2236 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2237 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2238
2239 req->buf_num[i] =
2240 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2241 req->buf_num[i] |=
2242 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2243 }
2244
2245 req->shared_buf =
2246 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2247 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2248
2249 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2250 if (ret)
2251 dev_err(&hdev->pdev->dev,
2252 "rx private buffer alloc cmd failed %d\n", ret);
2253
2254 return ret;
2255 }
2256
hclge_rx_priv_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2257 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2258 struct hclge_pkt_buf_alloc *buf_alloc)
2259 {
2260 struct hclge_rx_priv_wl_buf *req;
2261 struct hclge_priv_buf *priv;
2262 struct hclge_desc desc[2];
2263 int i, j;
2264 int ret;
2265
2266 for (i = 0; i < 2; i++) {
2267 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2268 false);
2269 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2270
2271 /* The first descriptor set the NEXT bit to 1 */
2272 if (i == 0)
2273 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2274 else
2275 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2276
2277 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2278 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2279
2280 priv = &buf_alloc->priv_buf[idx];
2281 req->tc_wl[j].high =
2282 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2283 req->tc_wl[j].high |=
2284 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2285 req->tc_wl[j].low =
2286 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2287 req->tc_wl[j].low |=
2288 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2289 }
2290 }
2291
2292 /* Send 2 descriptor at one time */
2293 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2294 if (ret)
2295 dev_err(&hdev->pdev->dev,
2296 "rx private waterline config cmd failed %d\n",
2297 ret);
2298 return ret;
2299 }
2300
hclge_common_thrd_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2301 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2302 struct hclge_pkt_buf_alloc *buf_alloc)
2303 {
2304 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2305 struct hclge_rx_com_thrd *req;
2306 struct hclge_desc desc[2];
2307 struct hclge_tc_thrd *tc;
2308 int i, j;
2309 int ret;
2310
2311 for (i = 0; i < 2; i++) {
2312 hclge_cmd_setup_basic_desc(&desc[i],
2313 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2314 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2315
2316 /* The first descriptor set the NEXT bit to 1 */
2317 if (i == 0)
2318 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2319 else
2320 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2321
2322 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2323 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2324
2325 req->com_thrd[j].high =
2326 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2327 req->com_thrd[j].high |=
2328 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2329 req->com_thrd[j].low =
2330 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2331 req->com_thrd[j].low |=
2332 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2333 }
2334 }
2335
2336 /* Send 2 descriptors at one time */
2337 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2338 if (ret)
2339 dev_err(&hdev->pdev->dev,
2340 "common threshold config cmd failed %d\n", ret);
2341 return ret;
2342 }
2343
hclge_common_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2344 static int hclge_common_wl_config(struct hclge_dev *hdev,
2345 struct hclge_pkt_buf_alloc *buf_alloc)
2346 {
2347 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2348 struct hclge_rx_com_wl *req;
2349 struct hclge_desc desc;
2350 int ret;
2351
2352 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2353
2354 req = (struct hclge_rx_com_wl *)desc.data;
2355 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2356 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2357
2358 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2359 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2360
2361 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2362 if (ret)
2363 dev_err(&hdev->pdev->dev,
2364 "common waterline config cmd failed %d\n", ret);
2365
2366 return ret;
2367 }
2368
hclge_buffer_alloc(struct hclge_dev * hdev)2369 int hclge_buffer_alloc(struct hclge_dev *hdev)
2370 {
2371 struct hclge_pkt_buf_alloc *pkt_buf;
2372 int ret;
2373
2374 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2375 if (!pkt_buf)
2376 return -ENOMEM;
2377
2378 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2379 if (ret) {
2380 dev_err(&hdev->pdev->dev,
2381 "could not calc tx buffer size for all TCs %d\n", ret);
2382 goto out;
2383 }
2384
2385 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2386 if (ret) {
2387 dev_err(&hdev->pdev->dev,
2388 "could not alloc tx buffers %d\n", ret);
2389 goto out;
2390 }
2391
2392 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2393 if (ret) {
2394 dev_err(&hdev->pdev->dev,
2395 "could not calc rx priv buffer size for all TCs %d\n",
2396 ret);
2397 goto out;
2398 }
2399
2400 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2401 if (ret) {
2402 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2403 ret);
2404 goto out;
2405 }
2406
2407 if (hnae3_dev_dcb_supported(hdev)) {
2408 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2409 if (ret) {
2410 dev_err(&hdev->pdev->dev,
2411 "could not configure rx private waterline %d\n",
2412 ret);
2413 goto out;
2414 }
2415
2416 ret = hclge_common_thrd_config(hdev, pkt_buf);
2417 if (ret) {
2418 dev_err(&hdev->pdev->dev,
2419 "could not configure common threshold %d\n",
2420 ret);
2421 goto out;
2422 }
2423 }
2424
2425 ret = hclge_common_wl_config(hdev, pkt_buf);
2426 if (ret)
2427 dev_err(&hdev->pdev->dev,
2428 "could not configure common waterline %d\n", ret);
2429
2430 out:
2431 kfree(pkt_buf);
2432 return ret;
2433 }
2434
hclge_init_roce_base_info(struct hclge_vport * vport)2435 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2436 {
2437 struct hnae3_handle *roce = &vport->roce;
2438 struct hnae3_handle *nic = &vport->nic;
2439 struct hclge_dev *hdev = vport->back;
2440
2441 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2442
2443 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2444 return -EINVAL;
2445
2446 roce->rinfo.base_vector = hdev->num_nic_msi;
2447
2448 roce->rinfo.netdev = nic->kinfo.netdev;
2449 roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
2450 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
2451
2452 roce->pdev = nic->pdev;
2453 roce->ae_algo = nic->ae_algo;
2454 bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
2455 MAX_NUMNODES);
2456
2457 return 0;
2458 }
2459
hclge_init_msi(struct hclge_dev * hdev)2460 static int hclge_init_msi(struct hclge_dev *hdev)
2461 {
2462 struct pci_dev *pdev = hdev->pdev;
2463 int vectors;
2464 int i;
2465
2466 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2467 hdev->num_msi,
2468 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2469 if (vectors < 0) {
2470 dev_err(&pdev->dev,
2471 "failed(%d) to allocate MSI/MSI-X vectors\n",
2472 vectors);
2473 return vectors;
2474 }
2475 if (vectors < hdev->num_msi)
2476 dev_warn(&hdev->pdev->dev,
2477 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2478 hdev->num_msi, vectors);
2479
2480 hdev->num_msi = vectors;
2481 hdev->num_msi_left = vectors;
2482
2483 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2484 sizeof(u16), GFP_KERNEL);
2485 if (!hdev->vector_status) {
2486 pci_free_irq_vectors(pdev);
2487 return -ENOMEM;
2488 }
2489
2490 for (i = 0; i < hdev->num_msi; i++)
2491 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2492
2493 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2494 sizeof(int), GFP_KERNEL);
2495 if (!hdev->vector_irq) {
2496 pci_free_irq_vectors(pdev);
2497 return -ENOMEM;
2498 }
2499
2500 return 0;
2501 }
2502
hclge_check_speed_dup(u8 duplex,int speed)2503 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2504 {
2505 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2506 duplex = HCLGE_MAC_FULL;
2507
2508 return duplex;
2509 }
2510
2511 static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = {
2512 {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M},
2513 {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M},
2514 {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G},
2515 {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G},
2516 {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G},
2517 {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G},
2518 {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G},
2519 {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G},
2520 {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G},
2521 };
2522
hclge_convert_to_fw_speed(u32 speed_drv,u32 * speed_fw)2523 static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
2524 {
2525 u16 i;
2526
2527 for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) {
2528 if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) {
2529 *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw;
2530 return 0;
2531 }
2532 }
2533
2534 return -EINVAL;
2535 }
2536
hclge_cfg_mac_speed_dup_hw(struct hclge_dev * hdev,int speed,u8 duplex,u8 lane_num)2537 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2538 u8 duplex, u8 lane_num)
2539 {
2540 struct hclge_config_mac_speed_dup_cmd *req;
2541 struct hclge_desc desc;
2542 u32 speed_fw;
2543 int ret;
2544
2545 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2546
2547 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2548
2549 if (duplex)
2550 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2551
2552 ret = hclge_convert_to_fw_speed(speed, &speed_fw);
2553 if (ret) {
2554 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2555 return ret;
2556 }
2557
2558 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S,
2559 speed_fw);
2560 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2561 1);
2562 req->lane_num = lane_num;
2563
2564 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2565 if (ret) {
2566 dev_err(&hdev->pdev->dev,
2567 "mac speed/duplex config cmd failed %d.\n", ret);
2568 return ret;
2569 }
2570
2571 return 0;
2572 }
2573
hclge_cfg_mac_speed_dup(struct hclge_dev * hdev,int speed,u8 duplex,u8 lane_num)2574 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num)
2575 {
2576 struct hclge_mac *mac = &hdev->hw.mac;
2577 int ret;
2578
2579 duplex = hclge_check_speed_dup(duplex, speed);
2580 if (!mac->support_autoneg && mac->speed == speed &&
2581 mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
2582 return 0;
2583
2584 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num);
2585 if (ret)
2586 return ret;
2587
2588 hdev->hw.mac.speed = speed;
2589 hdev->hw.mac.duplex = duplex;
2590 if (!lane_num)
2591 hdev->hw.mac.lane_num = lane_num;
2592
2593 return 0;
2594 }
2595
hclge_cfg_mac_speed_dup_h(struct hnae3_handle * handle,int speed,u8 duplex,u8 lane_num)2596 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2597 u8 duplex, u8 lane_num)
2598 {
2599 struct hclge_vport *vport = hclge_get_vport(handle);
2600 struct hclge_dev *hdev = vport->back;
2601
2602 return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
2603 }
2604
hclge_set_autoneg_en(struct hclge_dev * hdev,bool enable)2605 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2606 {
2607 struct hclge_config_auto_neg_cmd *req;
2608 struct hclge_desc desc;
2609 u32 flag = 0;
2610 int ret;
2611
2612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2613
2614 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2615 if (enable)
2616 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2617 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2618
2619 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2620 if (ret)
2621 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2622 ret);
2623
2624 return ret;
2625 }
2626
hclge_set_autoneg(struct hnae3_handle * handle,bool enable)2627 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2628 {
2629 struct hclge_vport *vport = hclge_get_vport(handle);
2630 struct hclge_dev *hdev = vport->back;
2631
2632 if (!hdev->hw.mac.support_autoneg) {
2633 if (enable) {
2634 dev_err(&hdev->pdev->dev,
2635 "autoneg is not supported by current port\n");
2636 return -EOPNOTSUPP;
2637 } else {
2638 return 0;
2639 }
2640 }
2641
2642 return hclge_set_autoneg_en(hdev, enable);
2643 }
2644
hclge_get_autoneg(struct hnae3_handle * handle)2645 static int hclge_get_autoneg(struct hnae3_handle *handle)
2646 {
2647 struct hclge_vport *vport = hclge_get_vport(handle);
2648 struct hclge_dev *hdev = vport->back;
2649 struct phy_device *phydev = hdev->hw.mac.phydev;
2650
2651 if (phydev)
2652 return phydev->autoneg;
2653
2654 return hdev->hw.mac.autoneg;
2655 }
2656
hclge_restart_autoneg(struct hnae3_handle * handle)2657 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2658 {
2659 struct hclge_vport *vport = hclge_get_vport(handle);
2660 struct hclge_dev *hdev = vport->back;
2661 int ret;
2662
2663 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2664
2665 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2666 if (ret)
2667 return ret;
2668 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2669 }
2670
hclge_halt_autoneg(struct hnae3_handle * handle,bool halt)2671 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2672 {
2673 struct hclge_vport *vport = hclge_get_vport(handle);
2674 struct hclge_dev *hdev = vport->back;
2675
2676 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2677 return hclge_set_autoneg_en(hdev, !halt);
2678
2679 return 0;
2680 }
2681
hclge_parse_fec_stats_lanes(struct hclge_dev * hdev,struct hclge_desc * desc,u32 desc_len)2682 static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev,
2683 struct hclge_desc *desc, u32 desc_len)
2684 {
2685 u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2;
2686 u32 desc_index = 0;
2687 u32 data_index = 0;
2688 u32 i;
2689
2690 for (i = 0; i < lane_size; i++) {
2691 if (data_index >= HCLGE_DESC_DATA_LEN) {
2692 desc_index++;
2693 data_index = 0;
2694 }
2695
2696 if (desc_index >= desc_len)
2697 return;
2698
2699 hdev->fec_stats.per_lanes[i] +=
2700 le32_to_cpu(desc[desc_index].data[data_index]);
2701 data_index++;
2702 }
2703 }
2704
hclge_parse_fec_stats(struct hclge_dev * hdev,struct hclge_desc * desc,u32 desc_len)2705 static void hclge_parse_fec_stats(struct hclge_dev *hdev,
2706 struct hclge_desc *desc, u32 desc_len)
2707 {
2708 struct hclge_query_fec_stats_cmd *req;
2709
2710 req = (struct hclge_query_fec_stats_cmd *)desc[0].data;
2711
2712 hdev->fec_stats.base_r_lane_num = req->base_r_lane_num;
2713 hdev->fec_stats.rs_corr_blocks +=
2714 le32_to_cpu(req->rs_fec_corr_blocks);
2715 hdev->fec_stats.rs_uncorr_blocks +=
2716 le32_to_cpu(req->rs_fec_uncorr_blocks);
2717 hdev->fec_stats.rs_error_blocks +=
2718 le32_to_cpu(req->rs_fec_error_blocks);
2719 hdev->fec_stats.base_r_corr_blocks +=
2720 le32_to_cpu(req->base_r_fec_corr_blocks);
2721 hdev->fec_stats.base_r_uncorr_blocks +=
2722 le32_to_cpu(req->base_r_fec_uncorr_blocks);
2723
2724 hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1);
2725 }
2726
hclge_update_fec_stats_hw(struct hclge_dev * hdev)2727 static int hclge_update_fec_stats_hw(struct hclge_dev *hdev)
2728 {
2729 struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM];
2730 int ret;
2731 u32 i;
2732
2733 for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) {
2734 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS,
2735 true);
2736 if (i != (HCLGE_FEC_STATS_CMD_NUM - 1))
2737 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2738 }
2739
2740 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM);
2741 if (ret)
2742 return ret;
2743
2744 hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM);
2745
2746 return 0;
2747 }
2748
hclge_update_fec_stats(struct hclge_dev * hdev)2749 static void hclge_update_fec_stats(struct hclge_dev *hdev)
2750 {
2751 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2752 int ret;
2753
2754 if (!hnae3_ae_dev_fec_stats_supported(ae_dev) ||
2755 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state))
2756 return;
2757
2758 ret = hclge_update_fec_stats_hw(hdev);
2759 if (ret)
2760 dev_err(&hdev->pdev->dev,
2761 "failed to update fec stats, ret = %d\n", ret);
2762
2763 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state);
2764 }
2765
hclge_get_fec_stats_total(struct hclge_dev * hdev,struct ethtool_fec_stats * fec_stats)2766 static void hclge_get_fec_stats_total(struct hclge_dev *hdev,
2767 struct ethtool_fec_stats *fec_stats)
2768 {
2769 fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks;
2770 fec_stats->uncorrectable_blocks.total =
2771 hdev->fec_stats.rs_uncorr_blocks;
2772 }
2773
hclge_get_fec_stats_lanes(struct hclge_dev * hdev,struct ethtool_fec_stats * fec_stats)2774 static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev,
2775 struct ethtool_fec_stats *fec_stats)
2776 {
2777 u32 i;
2778
2779 if (hdev->fec_stats.base_r_lane_num == 0 ||
2780 hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) {
2781 dev_err(&hdev->pdev->dev,
2782 "fec stats lane number(%llu) is invalid\n",
2783 hdev->fec_stats.base_r_lane_num);
2784 return;
2785 }
2786
2787 for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) {
2788 fec_stats->corrected_blocks.lanes[i] =
2789 hdev->fec_stats.base_r_corr_per_lanes[i];
2790 fec_stats->uncorrectable_blocks.lanes[i] =
2791 hdev->fec_stats.base_r_uncorr_per_lanes[i];
2792 }
2793 }
2794
hclge_comm_get_fec_stats(struct hclge_dev * hdev,struct ethtool_fec_stats * fec_stats)2795 static void hclge_comm_get_fec_stats(struct hclge_dev *hdev,
2796 struct ethtool_fec_stats *fec_stats)
2797 {
2798 u32 fec_mode = hdev->hw.mac.fec_mode;
2799
2800 switch (fec_mode) {
2801 case BIT(HNAE3_FEC_RS):
2802 case BIT(HNAE3_FEC_LLRS):
2803 hclge_get_fec_stats_total(hdev, fec_stats);
2804 break;
2805 case BIT(HNAE3_FEC_BASER):
2806 hclge_get_fec_stats_lanes(hdev, fec_stats);
2807 break;
2808 default:
2809 dev_err(&hdev->pdev->dev,
2810 "fec stats is not supported by current fec mode(0x%x)\n",
2811 fec_mode);
2812 break;
2813 }
2814 }
2815
hclge_get_fec_stats(struct hnae3_handle * handle,struct ethtool_fec_stats * fec_stats)2816 static void hclge_get_fec_stats(struct hnae3_handle *handle,
2817 struct ethtool_fec_stats *fec_stats)
2818 {
2819 struct hclge_vport *vport = hclge_get_vport(handle);
2820 struct hclge_dev *hdev = vport->back;
2821 u32 fec_mode = hdev->hw.mac.fec_mode;
2822
2823 if (fec_mode == BIT(HNAE3_FEC_NONE) ||
2824 fec_mode == BIT(HNAE3_FEC_AUTO) ||
2825 fec_mode == BIT(HNAE3_FEC_USER_DEF))
2826 return;
2827
2828 hclge_update_fec_stats(hdev);
2829
2830 hclge_comm_get_fec_stats(hdev, fec_stats);
2831 }
2832
hclge_set_fec_hw(struct hclge_dev * hdev,u32 fec_mode)2833 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2834 {
2835 struct hclge_config_fec_cmd *req;
2836 struct hclge_desc desc;
2837 int ret;
2838
2839 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2840
2841 req = (struct hclge_config_fec_cmd *)desc.data;
2842 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2843 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2844 if (fec_mode & BIT(HNAE3_FEC_RS))
2845 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2846 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2847 if (fec_mode & BIT(HNAE3_FEC_LLRS))
2848 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2849 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS);
2850 if (fec_mode & BIT(HNAE3_FEC_BASER))
2851 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2852 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2853
2854 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2855 if (ret)
2856 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2857
2858 return ret;
2859 }
2860
hclge_set_fec(struct hnae3_handle * handle,u32 fec_mode)2861 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2862 {
2863 struct hclge_vport *vport = hclge_get_vport(handle);
2864 struct hclge_dev *hdev = vport->back;
2865 struct hclge_mac *mac = &hdev->hw.mac;
2866 int ret;
2867
2868 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2869 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2870 return -EINVAL;
2871 }
2872
2873 ret = hclge_set_fec_hw(hdev, fec_mode);
2874 if (ret)
2875 return ret;
2876
2877 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2878 return 0;
2879 }
2880
hclge_get_fec(struct hnae3_handle * handle,u8 * fec_ability,u8 * fec_mode)2881 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2882 u8 *fec_mode)
2883 {
2884 struct hclge_vport *vport = hclge_get_vport(handle);
2885 struct hclge_dev *hdev = vport->back;
2886 struct hclge_mac *mac = &hdev->hw.mac;
2887
2888 if (fec_ability)
2889 *fec_ability = mac->fec_ability;
2890 if (fec_mode)
2891 *fec_mode = mac->fec_mode;
2892 }
2893
hclge_mac_init(struct hclge_dev * hdev)2894 static int hclge_mac_init(struct hclge_dev *hdev)
2895 {
2896 struct hclge_mac *mac = &hdev->hw.mac;
2897 int ret;
2898
2899 hdev->support_sfp_query = true;
2900
2901 if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2902 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2903
2904 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2905 hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
2906 if (ret)
2907 return ret;
2908
2909 if (hdev->hw.mac.support_autoneg) {
2910 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2911 if (ret)
2912 return ret;
2913 }
2914
2915 mac->link = 0;
2916
2917 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2918 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2919 if (ret)
2920 return ret;
2921 }
2922
2923 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2924 if (ret) {
2925 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2926 return ret;
2927 }
2928
2929 ret = hclge_set_default_loopback(hdev);
2930 if (ret)
2931 return ret;
2932
2933 ret = hclge_buffer_alloc(hdev);
2934 if (ret)
2935 dev_err(&hdev->pdev->dev,
2936 "allocate buffer fail, ret=%d\n", ret);
2937
2938 return ret;
2939 }
2940
hclge_mbx_task_schedule(struct hclge_dev * hdev)2941 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2942 {
2943 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2944 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
2945 hdev->last_mbx_scheduled = jiffies;
2946 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2947 }
2948 }
2949
hclge_reset_task_schedule(struct hclge_dev * hdev)2950 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2951 {
2952 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2953 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
2954 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
2955 hdev->last_rst_scheduled = jiffies;
2956 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2957 }
2958 }
2959
hclge_errhand_task_schedule(struct hclge_dev * hdev)2960 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2961 {
2962 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2963 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2964 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2965 }
2966
hclge_task_schedule(struct hclge_dev * hdev,unsigned long delay_time)2967 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2968 {
2969 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2970 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2971 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
2972 }
2973
hclge_get_mac_link_status(struct hclge_dev * hdev,int * link_status)2974 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2975 {
2976 struct hclge_link_status_cmd *req;
2977 struct hclge_desc desc;
2978 int ret;
2979
2980 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2981 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2982 if (ret) {
2983 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2984 ret);
2985 return ret;
2986 }
2987
2988 req = (struct hclge_link_status_cmd *)desc.data;
2989 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2990 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2991
2992 return 0;
2993 }
2994
hclge_get_mac_phy_link(struct hclge_dev * hdev,int * link_status)2995 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2996 {
2997 struct phy_device *phydev = hdev->hw.mac.phydev;
2998
2999 *link_status = HCLGE_LINK_STATUS_DOWN;
3000
3001 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
3002 return 0;
3003
3004 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
3005 return 0;
3006
3007 return hclge_get_mac_link_status(hdev, link_status);
3008 }
3009
hclge_push_link_status(struct hclge_dev * hdev)3010 static void hclge_push_link_status(struct hclge_dev *hdev)
3011 {
3012 struct hclge_vport *vport;
3013 int ret;
3014 u16 i;
3015
3016 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
3017 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
3018
3019 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
3020 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
3021 continue;
3022
3023 ret = hclge_push_vf_link_status(vport);
3024 if (ret) {
3025 dev_err(&hdev->pdev->dev,
3026 "failed to push link status to vf%u, ret = %d\n",
3027 i, ret);
3028 }
3029 }
3030 }
3031
hclge_update_link_status(struct hclge_dev * hdev)3032 static void hclge_update_link_status(struct hclge_dev *hdev)
3033 {
3034 struct hnae3_handle *handle = &hdev->vport[0].nic;
3035 struct hnae3_client *client = hdev->nic_client;
3036 int state;
3037 int ret;
3038
3039 if (!client)
3040 return;
3041
3042 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
3043 return;
3044
3045 ret = hclge_get_mac_phy_link(hdev, &state);
3046 if (ret) {
3047 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3048 return;
3049 }
3050
3051 if (state != hdev->hw.mac.link) {
3052 hdev->hw.mac.link = state;
3053 if (state == HCLGE_LINK_STATUS_UP)
3054 hclge_update_port_info(hdev);
3055
3056 client->ops->link_status_change(handle, state);
3057 hclge_config_mac_tnl_int(hdev, state);
3058
3059 if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) {
3060 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
3061 struct hnae3_client *rclient = hdev->roce_client;
3062
3063 if (rclient && rclient->ops->link_status_change)
3064 rclient->ops->link_status_change(rhandle,
3065 state);
3066 }
3067
3068 hclge_push_link_status(hdev);
3069 }
3070
3071 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3072 }
3073
hclge_update_speed_advertising(struct hclge_mac * mac)3074 static void hclge_update_speed_advertising(struct hclge_mac *mac)
3075 {
3076 u32 speed_ability;
3077
3078 if (hclge_get_speed_bit(mac->speed, &speed_ability))
3079 return;
3080
3081 switch (mac->module_type) {
3082 case HNAE3_MODULE_TYPE_FIBRE_LR:
3083 hclge_convert_setting_lr(speed_ability, mac->advertising);
3084 break;
3085 case HNAE3_MODULE_TYPE_FIBRE_SR:
3086 case HNAE3_MODULE_TYPE_AOC:
3087 hclge_convert_setting_sr(speed_ability, mac->advertising);
3088 break;
3089 case HNAE3_MODULE_TYPE_CR:
3090 hclge_convert_setting_cr(speed_ability, mac->advertising);
3091 break;
3092 case HNAE3_MODULE_TYPE_KR:
3093 hclge_convert_setting_kr(speed_ability, mac->advertising);
3094 break;
3095 default:
3096 break;
3097 }
3098 }
3099
hclge_update_fec_advertising(struct hclge_mac * mac)3100 static void hclge_update_fec_advertising(struct hclge_mac *mac)
3101 {
3102 if (mac->fec_mode & BIT(HNAE3_FEC_RS))
3103 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
3104 mac->advertising);
3105 else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS))
3106 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
3107 mac->advertising);
3108 else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
3109 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
3110 mac->advertising);
3111 else
3112 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
3113 mac->advertising);
3114 }
3115
hclge_update_pause_advertising(struct hclge_dev * hdev)3116 static void hclge_update_pause_advertising(struct hclge_dev *hdev)
3117 {
3118 struct hclge_mac *mac = &hdev->hw.mac;
3119 bool rx_en, tx_en;
3120
3121 switch (hdev->fc_mode_last_time) {
3122 case HCLGE_FC_RX_PAUSE:
3123 rx_en = true;
3124 tx_en = false;
3125 break;
3126 case HCLGE_FC_TX_PAUSE:
3127 rx_en = false;
3128 tx_en = true;
3129 break;
3130 case HCLGE_FC_FULL:
3131 rx_en = true;
3132 tx_en = true;
3133 break;
3134 default:
3135 rx_en = false;
3136 tx_en = false;
3137 break;
3138 }
3139
3140 linkmode_set_pause(mac->advertising, tx_en, rx_en);
3141 }
3142
hclge_update_advertising(struct hclge_dev * hdev)3143 static void hclge_update_advertising(struct hclge_dev *hdev)
3144 {
3145 struct hclge_mac *mac = &hdev->hw.mac;
3146
3147 linkmode_zero(mac->advertising);
3148 hclge_update_speed_advertising(mac);
3149 hclge_update_fec_advertising(mac);
3150 hclge_update_pause_advertising(hdev);
3151 }
3152
hclge_update_port_capability(struct hclge_dev * hdev,struct hclge_mac * mac)3153 static void hclge_update_port_capability(struct hclge_dev *hdev,
3154 struct hclge_mac *mac)
3155 {
3156 if (hnae3_dev_fec_supported(hdev))
3157 hclge_convert_setting_fec(mac);
3158
3159 /* firmware can not identify back plane type, the media type
3160 * read from configuration can help deal it
3161 */
3162 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
3163 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
3164 mac->module_type = HNAE3_MODULE_TYPE_KR;
3165 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3166 mac->module_type = HNAE3_MODULE_TYPE_TP;
3167
3168 if (mac->support_autoneg) {
3169 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
3170 linkmode_copy(mac->advertising, mac->supported);
3171 } else {
3172 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
3173 mac->supported);
3174 hclge_update_advertising(hdev);
3175 }
3176 }
3177
hclge_get_sfp_speed(struct hclge_dev * hdev,u32 * speed)3178 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
3179 {
3180 struct hclge_sfp_info_cmd *resp;
3181 struct hclge_desc desc;
3182 int ret;
3183
3184 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3185 resp = (struct hclge_sfp_info_cmd *)desc.data;
3186 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3187 if (ret == -EOPNOTSUPP) {
3188 dev_warn(&hdev->pdev->dev,
3189 "IMP do not support get SFP speed %d\n", ret);
3190 return ret;
3191 } else if (ret) {
3192 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3193 return ret;
3194 }
3195
3196 *speed = le32_to_cpu(resp->speed);
3197
3198 return 0;
3199 }
3200
hclge_get_sfp_info(struct hclge_dev * hdev,struct hclge_mac * mac)3201 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3202 {
3203 struct hclge_sfp_info_cmd *resp;
3204 struct hclge_desc desc;
3205 int ret;
3206
3207 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3208 resp = (struct hclge_sfp_info_cmd *)desc.data;
3209
3210 resp->query_type = QUERY_ACTIVE_SPEED;
3211
3212 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3213 if (ret == -EOPNOTSUPP) {
3214 dev_warn(&hdev->pdev->dev,
3215 "IMP does not support get SFP info %d\n", ret);
3216 return ret;
3217 } else if (ret) {
3218 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3219 return ret;
3220 }
3221
3222 /* In some case, mac speed get from IMP may be 0, it shouldn't be
3223 * set to mac->speed.
3224 */
3225 if (!le32_to_cpu(resp->speed))
3226 return 0;
3227
3228 mac->speed = le32_to_cpu(resp->speed);
3229 /* if resp->speed_ability is 0, it means it's an old version
3230 * firmware, do not update these params
3231 */
3232 if (resp->speed_ability) {
3233 mac->module_type = le32_to_cpu(resp->module_type);
3234 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3235 mac->autoneg = resp->autoneg;
3236 mac->support_autoneg = resp->autoneg_ability;
3237 mac->speed_type = QUERY_ACTIVE_SPEED;
3238 mac->lane_num = resp->lane_num;
3239 if (!resp->active_fec)
3240 mac->fec_mode = 0;
3241 else
3242 mac->fec_mode = BIT(resp->active_fec);
3243 mac->fec_ability = resp->fec_ability;
3244 } else {
3245 mac->speed_type = QUERY_SFP_SPEED;
3246 }
3247
3248 return 0;
3249 }
3250
hclge_get_phy_link_ksettings(struct hnae3_handle * handle,struct ethtool_link_ksettings * cmd)3251 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3252 struct ethtool_link_ksettings *cmd)
3253 {
3254 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3255 struct hclge_vport *vport = hclge_get_vport(handle);
3256 struct hclge_phy_link_ksetting_0_cmd *req0;
3257 struct hclge_phy_link_ksetting_1_cmd *req1;
3258 u32 supported, advertising, lp_advertising;
3259 struct hclge_dev *hdev = vport->back;
3260 int ret;
3261
3262 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3263 true);
3264 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
3265 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3266 true);
3267
3268 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3269 if (ret) {
3270 dev_err(&hdev->pdev->dev,
3271 "failed to get phy link ksetting, ret = %d.\n", ret);
3272 return ret;
3273 }
3274
3275 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3276 cmd->base.autoneg = req0->autoneg;
3277 cmd->base.speed = le32_to_cpu(req0->speed);
3278 cmd->base.duplex = req0->duplex;
3279 cmd->base.port = req0->port;
3280 cmd->base.transceiver = req0->transceiver;
3281 cmd->base.phy_address = req0->phy_address;
3282 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3283 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3284 supported = le32_to_cpu(req0->supported);
3285 advertising = le32_to_cpu(req0->advertising);
3286 lp_advertising = le32_to_cpu(req0->lp_advertising);
3287 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3288 supported);
3289 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3290 advertising);
3291 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3292 lp_advertising);
3293
3294 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3295 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3296 cmd->base.master_slave_state = req1->master_slave_state;
3297
3298 return 0;
3299 }
3300
3301 static int
hclge_set_phy_link_ksettings(struct hnae3_handle * handle,const struct ethtool_link_ksettings * cmd)3302 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3303 const struct ethtool_link_ksettings *cmd)
3304 {
3305 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3306 struct hclge_vport *vport = hclge_get_vport(handle);
3307 struct hclge_phy_link_ksetting_0_cmd *req0;
3308 struct hclge_phy_link_ksetting_1_cmd *req1;
3309 struct hclge_dev *hdev = vport->back;
3310 u32 advertising;
3311 int ret;
3312
3313 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3314 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3315 (cmd->base.duplex != DUPLEX_HALF &&
3316 cmd->base.duplex != DUPLEX_FULL)))
3317 return -EINVAL;
3318
3319 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3320 false);
3321 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
3322 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3323 false);
3324
3325 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3326 req0->autoneg = cmd->base.autoneg;
3327 req0->speed = cpu_to_le32(cmd->base.speed);
3328 req0->duplex = cmd->base.duplex;
3329 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3330 cmd->link_modes.advertising);
3331 req0->advertising = cpu_to_le32(advertising);
3332 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3333
3334 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3335 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3336
3337 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3338 if (ret) {
3339 dev_err(&hdev->pdev->dev,
3340 "failed to set phy link ksettings, ret = %d.\n", ret);
3341 return ret;
3342 }
3343
3344 hdev->hw.mac.req_autoneg = cmd->base.autoneg;
3345 hdev->hw.mac.req_speed = cmd->base.speed;
3346 hdev->hw.mac.req_duplex = cmd->base.duplex;
3347 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3348
3349 return 0;
3350 }
3351
hclge_update_tp_port_info(struct hclge_dev * hdev)3352 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3353 {
3354 struct ethtool_link_ksettings cmd;
3355 int ret;
3356
3357 if (!hnae3_dev_phy_imp_supported(hdev))
3358 return 0;
3359
3360 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3361 if (ret)
3362 return ret;
3363
3364 hdev->hw.mac.autoneg = cmd.base.autoneg;
3365 hdev->hw.mac.speed = cmd.base.speed;
3366 hdev->hw.mac.duplex = cmd.base.duplex;
3367 linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising);
3368
3369 return 0;
3370 }
3371
hclge_tp_port_init(struct hclge_dev * hdev)3372 static int hclge_tp_port_init(struct hclge_dev *hdev)
3373 {
3374 struct ethtool_link_ksettings cmd;
3375
3376 if (!hnae3_dev_phy_imp_supported(hdev))
3377 return 0;
3378
3379 cmd.base.autoneg = hdev->hw.mac.req_autoneg;
3380 cmd.base.speed = hdev->hw.mac.req_speed;
3381 cmd.base.duplex = hdev->hw.mac.req_duplex;
3382 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3383
3384 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3385 }
3386
hclge_update_port_info(struct hclge_dev * hdev)3387 static int hclge_update_port_info(struct hclge_dev *hdev)
3388 {
3389 struct hclge_mac *mac = &hdev->hw.mac;
3390 int speed;
3391 int ret;
3392
3393 /* get the port info from SFP cmd if not copper port */
3394 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3395 return hclge_update_tp_port_info(hdev);
3396
3397 /* if IMP does not support get SFP/qSFP info, return directly */
3398 if (!hdev->support_sfp_query)
3399 return 0;
3400
3401 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3402 speed = mac->speed;
3403 ret = hclge_get_sfp_info(hdev, mac);
3404 } else {
3405 speed = HCLGE_MAC_SPEED_UNKNOWN;
3406 ret = hclge_get_sfp_speed(hdev, &speed);
3407 }
3408
3409 if (ret == -EOPNOTSUPP) {
3410 hdev->support_sfp_query = false;
3411 return ret;
3412 } else if (ret) {
3413 return ret;
3414 }
3415
3416 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3417 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3418 hclge_update_port_capability(hdev, mac);
3419 if (mac->speed != speed)
3420 (void)hclge_tm_port_shaper_cfg(hdev);
3421 return 0;
3422 }
3423 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3424 HCLGE_MAC_FULL, mac->lane_num);
3425 } else {
3426 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3427 return 0; /* do nothing if no SFP */
3428
3429 /* must config full duplex for SFP */
3430 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0);
3431 }
3432 }
3433
hclge_get_status(struct hnae3_handle * handle)3434 static int hclge_get_status(struct hnae3_handle *handle)
3435 {
3436 struct hclge_vport *vport = hclge_get_vport(handle);
3437 struct hclge_dev *hdev = vport->back;
3438
3439 hclge_update_link_status(hdev);
3440
3441 return hdev->hw.mac.link;
3442 }
3443
hclge_get_vf_vport(struct hclge_dev * hdev,int vf)3444 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3445 {
3446 if (!pci_num_vf(hdev->pdev)) {
3447 dev_err(&hdev->pdev->dev,
3448 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3449 return NULL;
3450 }
3451
3452 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3453 dev_err(&hdev->pdev->dev,
3454 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3455 vf, pci_num_vf(hdev->pdev));
3456 return NULL;
3457 }
3458
3459 /* VF start from 1 in vport */
3460 vf += HCLGE_VF_VPORT_START_NUM;
3461 return &hdev->vport[vf];
3462 }
3463
hclge_get_vf_config(struct hnae3_handle * handle,int vf,struct ifla_vf_info * ivf)3464 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3465 struct ifla_vf_info *ivf)
3466 {
3467 struct hclge_vport *vport = hclge_get_vport(handle);
3468 struct hclge_dev *hdev = vport->back;
3469
3470 vport = hclge_get_vf_vport(hdev, vf);
3471 if (!vport)
3472 return -EINVAL;
3473
3474 ivf->vf = vf;
3475 ivf->linkstate = vport->vf_info.link_state;
3476 ivf->spoofchk = vport->vf_info.spoofchk;
3477 ivf->trusted = vport->vf_info.trusted;
3478 ivf->min_tx_rate = 0;
3479 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3480 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3481 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3482 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3483 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3484
3485 return 0;
3486 }
3487
hclge_set_vf_link_state(struct hnae3_handle * handle,int vf,int link_state)3488 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3489 int link_state)
3490 {
3491 struct hclge_vport *vport = hclge_get_vport(handle);
3492 struct hclge_dev *hdev = vport->back;
3493 int link_state_old;
3494 int ret;
3495
3496 vport = hclge_get_vf_vport(hdev, vf);
3497 if (!vport)
3498 return -EINVAL;
3499
3500 link_state_old = vport->vf_info.link_state;
3501 vport->vf_info.link_state = link_state;
3502
3503 /* return success directly if the VF is unalive, VF will
3504 * query link state itself when it starts work.
3505 */
3506 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3507 return 0;
3508
3509 ret = hclge_push_vf_link_status(vport);
3510 if (ret) {
3511 vport->vf_info.link_state = link_state_old;
3512 dev_err(&hdev->pdev->dev,
3513 "failed to push vf%d link status, ret = %d\n", vf, ret);
3514 }
3515
3516 return ret;
3517 }
3518
hclge_check_event_cause(struct hclge_dev * hdev,u32 * clearval)3519 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3520 {
3521 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3522
3523 /* fetch the events from their corresponding regs */
3524 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3525 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3526 hw_err_src_reg = hclge_read_dev(&hdev->hw,
3527 HCLGE_RAS_PF_OTHER_INT_STS_REG);
3528
3529 /* Assumption: If by any chance reset and mailbox events are reported
3530 * together then we will only process reset event in this go and will
3531 * defer the processing of the mailbox events. Since, we would have not
3532 * cleared RX CMDQ event this time we would receive again another
3533 * interrupt from H/W just for the mailbox.
3534 *
3535 * check for vector0 reset event sources
3536 */
3537 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3538 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3539 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3540 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3541 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3542 hdev->rst_stats.imp_rst_cnt++;
3543 return HCLGE_VECTOR0_EVENT_RST;
3544 }
3545
3546 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3547 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3548 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3549 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3550 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3551 hdev->rst_stats.global_rst_cnt++;
3552 return HCLGE_VECTOR0_EVENT_RST;
3553 }
3554
3555 /* check for vector0 msix event and hardware error event source */
3556 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3557 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3558 return HCLGE_VECTOR0_EVENT_ERR;
3559
3560 /* check for vector0 ptp event source */
3561 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3562 *clearval = msix_src_reg;
3563 return HCLGE_VECTOR0_EVENT_PTP;
3564 }
3565
3566 /* check for vector0 mailbox(=CMDQ RX) event source */
3567 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3568 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3569 *clearval = cmdq_src_reg;
3570 return HCLGE_VECTOR0_EVENT_MBX;
3571 }
3572
3573 /* print other vector0 event source */
3574 dev_info(&hdev->pdev->dev,
3575 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3576 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3577
3578 return HCLGE_VECTOR0_EVENT_OTHER;
3579 }
3580
hclge_clear_event_cause(struct hclge_dev * hdev,u32 event_type,u32 regclr)3581 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3582 u32 regclr)
3583 {
3584 #define HCLGE_IMP_RESET_DELAY 5
3585
3586 switch (event_type) {
3587 case HCLGE_VECTOR0_EVENT_PTP:
3588 case HCLGE_VECTOR0_EVENT_RST:
3589 if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
3590 mdelay(HCLGE_IMP_RESET_DELAY);
3591
3592 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3593 break;
3594 case HCLGE_VECTOR0_EVENT_MBX:
3595 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3596 break;
3597 default:
3598 break;
3599 }
3600 }
3601
hclge_clear_all_event_cause(struct hclge_dev * hdev)3602 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3603 {
3604 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3605 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3606 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3607 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3608 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3609 }
3610
hclge_enable_vector(struct hclge_misc_vector * vector,bool enable)3611 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3612 {
3613 writel(enable ? 1 : 0, vector->addr);
3614 }
3615
hclge_misc_irq_handle(int irq,void * data)3616 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3617 {
3618 struct hclge_dev *hdev = data;
3619 unsigned long flags;
3620 u32 clearval = 0;
3621 u32 event_cause;
3622
3623 hclge_enable_vector(&hdev->misc_vector, false);
3624 event_cause = hclge_check_event_cause(hdev, &clearval);
3625
3626 /* vector 0 interrupt is shared with reset and mailbox source events. */
3627 switch (event_cause) {
3628 case HCLGE_VECTOR0_EVENT_ERR:
3629 hclge_errhand_task_schedule(hdev);
3630 break;
3631 case HCLGE_VECTOR0_EVENT_RST:
3632 hclge_reset_task_schedule(hdev);
3633 break;
3634 case HCLGE_VECTOR0_EVENT_PTP:
3635 spin_lock_irqsave(&hdev->ptp->lock, flags);
3636 hclge_ptp_clean_tx_hwts(hdev);
3637 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3638 break;
3639 case HCLGE_VECTOR0_EVENT_MBX:
3640 /* If we are here then,
3641 * 1. Either we are not handling any mbx task and we are not
3642 * scheduled as well
3643 * OR
3644 * 2. We could be handling a mbx task but nothing more is
3645 * scheduled.
3646 * In both cases, we should schedule mbx task as there are more
3647 * mbx messages reported by this interrupt.
3648 */
3649 hclge_mbx_task_schedule(hdev);
3650 break;
3651 default:
3652 dev_warn(&hdev->pdev->dev,
3653 "received unknown or unhandled event of vector0\n");
3654 break;
3655 }
3656
3657 hclge_clear_event_cause(hdev, event_cause, clearval);
3658
3659 /* Enable interrupt if it is not caused by reset event or error event */
3660 if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3661 event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3662 event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3663 hclge_enable_vector(&hdev->misc_vector, true);
3664
3665 return IRQ_HANDLED;
3666 }
3667
hclge_free_vector(struct hclge_dev * hdev,int vector_id)3668 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3669 {
3670 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3671 dev_warn(&hdev->pdev->dev,
3672 "vector(vector_id %d) has been freed.\n", vector_id);
3673 return;
3674 }
3675
3676 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3677 hdev->num_msi_left += 1;
3678 hdev->num_msi_used -= 1;
3679 }
3680
hclge_get_misc_vector(struct hclge_dev * hdev)3681 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3682 {
3683 struct hclge_misc_vector *vector = &hdev->misc_vector;
3684
3685 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3686
3687 vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3688 hdev->vector_status[0] = 0;
3689
3690 hdev->num_msi_left -= 1;
3691 hdev->num_msi_used += 1;
3692 }
3693
hclge_misc_irq_init(struct hclge_dev * hdev)3694 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3695 {
3696 int ret;
3697
3698 hclge_get_misc_vector(hdev);
3699
3700 /* this would be explicitly freed in the end */
3701 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3702 HCLGE_NAME, pci_name(hdev->pdev));
3703 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3704 0, hdev->misc_vector.name, hdev);
3705 if (ret) {
3706 hclge_free_vector(hdev, 0);
3707 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3708 hdev->misc_vector.vector_irq);
3709 }
3710
3711 return ret;
3712 }
3713
hclge_misc_irq_uninit(struct hclge_dev * hdev)3714 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3715 {
3716 free_irq(hdev->misc_vector.vector_irq, hdev);
3717 hclge_free_vector(hdev, 0);
3718 }
3719
hclge_notify_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3720 int hclge_notify_client(struct hclge_dev *hdev,
3721 enum hnae3_reset_notify_type type)
3722 {
3723 struct hnae3_handle *handle = &hdev->vport[0].nic;
3724 struct hnae3_client *client = hdev->nic_client;
3725 int ret;
3726
3727 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3728 return 0;
3729
3730 if (!client->ops->reset_notify)
3731 return -EOPNOTSUPP;
3732
3733 ret = client->ops->reset_notify(handle, type);
3734 if (ret)
3735 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3736 type, ret);
3737
3738 return ret;
3739 }
3740
hclge_notify_roce_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3741 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3742 enum hnae3_reset_notify_type type)
3743 {
3744 struct hnae3_handle *handle = &hdev->vport[0].roce;
3745 struct hnae3_client *client = hdev->roce_client;
3746 int ret;
3747
3748 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3749 return 0;
3750
3751 if (!client->ops->reset_notify)
3752 return -EOPNOTSUPP;
3753
3754 ret = client->ops->reset_notify(handle, type);
3755 if (ret)
3756 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3757 type, ret);
3758
3759 return ret;
3760 }
3761
hclge_reset_wait(struct hclge_dev * hdev)3762 static int hclge_reset_wait(struct hclge_dev *hdev)
3763 {
3764 #define HCLGE_RESET_WATI_MS 100
3765 #define HCLGE_RESET_WAIT_CNT 350
3766
3767 u32 val, reg, reg_bit;
3768 u32 cnt = 0;
3769
3770 switch (hdev->reset_type) {
3771 case HNAE3_IMP_RESET:
3772 reg = HCLGE_GLOBAL_RESET_REG;
3773 reg_bit = HCLGE_IMP_RESET_BIT;
3774 break;
3775 case HNAE3_GLOBAL_RESET:
3776 reg = HCLGE_GLOBAL_RESET_REG;
3777 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3778 break;
3779 case HNAE3_FUNC_RESET:
3780 reg = HCLGE_FUN_RST_ING;
3781 reg_bit = HCLGE_FUN_RST_ING_B;
3782 break;
3783 default:
3784 dev_err(&hdev->pdev->dev,
3785 "Wait for unsupported reset type: %d\n",
3786 hdev->reset_type);
3787 return -EINVAL;
3788 }
3789
3790 val = hclge_read_dev(&hdev->hw, reg);
3791 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3792 msleep(HCLGE_RESET_WATI_MS);
3793 val = hclge_read_dev(&hdev->hw, reg);
3794 cnt++;
3795 }
3796
3797 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3798 dev_warn(&hdev->pdev->dev,
3799 "Wait for reset timeout: %d\n", hdev->reset_type);
3800 return -EBUSY;
3801 }
3802
3803 return 0;
3804 }
3805
hclge_set_vf_rst(struct hclge_dev * hdev,int func_id,bool reset)3806 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3807 {
3808 struct hclge_vf_rst_cmd *req;
3809 struct hclge_desc desc;
3810
3811 req = (struct hclge_vf_rst_cmd *)desc.data;
3812 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3813 req->dest_vfid = func_id;
3814
3815 if (reset)
3816 req->vf_rst = 0x1;
3817
3818 return hclge_cmd_send(&hdev->hw, &desc, 1);
3819 }
3820
hclge_set_all_vf_rst(struct hclge_dev * hdev,bool reset)3821 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3822 {
3823 int i;
3824
3825 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3826 struct hclge_vport *vport = &hdev->vport[i];
3827 int ret;
3828
3829 /* Send cmd to set/clear VF's FUNC_RST_ING */
3830 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3831 if (ret) {
3832 dev_err(&hdev->pdev->dev,
3833 "set vf(%u) rst failed %d!\n",
3834 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3835 ret);
3836 return ret;
3837 }
3838
3839 if (!reset ||
3840 !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state))
3841 continue;
3842
3843 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) &&
3844 hdev->reset_type == HNAE3_FUNC_RESET) {
3845 set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET,
3846 &vport->need_notify);
3847 continue;
3848 }
3849
3850 /* Inform VF to process the reset.
3851 * hclge_inform_reset_assert_to_vf may fail if VF
3852 * driver is not loaded.
3853 */
3854 ret = hclge_inform_reset_assert_to_vf(vport);
3855 if (ret)
3856 dev_warn(&hdev->pdev->dev,
3857 "inform reset to vf(%u) failed %d!\n",
3858 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3859 ret);
3860 }
3861
3862 return 0;
3863 }
3864
hclge_mailbox_service_task(struct hclge_dev * hdev)3865 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3866 {
3867 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3868 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) ||
3869 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3870 return;
3871
3872 if (time_is_before_jiffies(hdev->last_mbx_scheduled +
3873 HCLGE_MBX_SCHED_TIMEOUT))
3874 dev_warn(&hdev->pdev->dev,
3875 "mbx service task is scheduled after %ums on cpu%u!\n",
3876 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
3877 smp_processor_id());
3878
3879 hclge_mbx_handler(hdev);
3880
3881 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3882 }
3883
hclge_func_reset_sync_vf(struct hclge_dev * hdev)3884 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3885 {
3886 struct hclge_pf_rst_sync_cmd *req;
3887 struct hclge_desc desc;
3888 int cnt = 0;
3889 int ret;
3890
3891 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3892 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3893
3894 do {
3895 /* vf need to down netdev by mbx during PF or FLR reset */
3896 hclge_mailbox_service_task(hdev);
3897
3898 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3899 /* for compatible with old firmware, wait
3900 * 100 ms for VF to stop IO
3901 */
3902 if (ret == -EOPNOTSUPP) {
3903 msleep(HCLGE_RESET_SYNC_TIME);
3904 return;
3905 } else if (ret) {
3906 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3907 ret);
3908 return;
3909 } else if (req->all_vf_ready) {
3910 return;
3911 }
3912 msleep(HCLGE_PF_RESET_SYNC_TIME);
3913 hclge_comm_cmd_reuse_desc(&desc, true);
3914 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3915
3916 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3917 }
3918
hclge_report_hw_error(struct hclge_dev * hdev,enum hnae3_hw_error_type type)3919 void hclge_report_hw_error(struct hclge_dev *hdev,
3920 enum hnae3_hw_error_type type)
3921 {
3922 struct hnae3_client *client = hdev->nic_client;
3923
3924 if (!client || !client->ops->process_hw_error ||
3925 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3926 return;
3927
3928 client->ops->process_hw_error(&hdev->vport[0].nic, type);
3929 }
3930
hclge_handle_imp_error(struct hclge_dev * hdev)3931 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3932 {
3933 u32 reg_val;
3934
3935 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3936 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3937 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3938 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3939 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3940 }
3941
3942 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3943 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3944 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3945 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3946 }
3947 }
3948
hclge_func_reset_cmd(struct hclge_dev * hdev,int func_id)3949 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3950 {
3951 struct hclge_desc desc;
3952 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3953 int ret;
3954
3955 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3956 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3957 req->fun_reset_vfid = func_id;
3958
3959 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3960 if (ret)
3961 dev_err(&hdev->pdev->dev,
3962 "send function reset cmd fail, status =%d\n", ret);
3963
3964 return ret;
3965 }
3966
hclge_do_reset(struct hclge_dev * hdev)3967 static void hclge_do_reset(struct hclge_dev *hdev)
3968 {
3969 struct hnae3_handle *handle = &hdev->vport[0].nic;
3970 struct pci_dev *pdev = hdev->pdev;
3971 u32 val;
3972
3973 if (hclge_get_hw_reset_stat(handle)) {
3974 dev_info(&pdev->dev, "hardware reset not finish\n");
3975 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3976 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3977 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3978 return;
3979 }
3980
3981 switch (hdev->reset_type) {
3982 case HNAE3_IMP_RESET:
3983 dev_info(&pdev->dev, "IMP reset requested\n");
3984 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3985 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3986 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3987 break;
3988 case HNAE3_GLOBAL_RESET:
3989 dev_info(&pdev->dev, "global reset requested\n");
3990 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3991 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3992 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3993 break;
3994 case HNAE3_FUNC_RESET:
3995 dev_info(&pdev->dev, "PF reset requested\n");
3996 /* schedule again to check later */
3997 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3998 hclge_reset_task_schedule(hdev);
3999 break;
4000 default:
4001 dev_warn(&pdev->dev,
4002 "unsupported reset type: %d\n", hdev->reset_type);
4003 break;
4004 }
4005 }
4006
hclge_get_reset_level(struct hnae3_ae_dev * ae_dev,unsigned long * addr)4007 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
4008 unsigned long *addr)
4009 {
4010 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
4011 struct hclge_dev *hdev = ae_dev->priv;
4012
4013 /* return the highest priority reset level amongst all */
4014 if (test_bit(HNAE3_IMP_RESET, addr)) {
4015 rst_level = HNAE3_IMP_RESET;
4016 clear_bit(HNAE3_IMP_RESET, addr);
4017 clear_bit(HNAE3_GLOBAL_RESET, addr);
4018 clear_bit(HNAE3_FUNC_RESET, addr);
4019 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
4020 rst_level = HNAE3_GLOBAL_RESET;
4021 clear_bit(HNAE3_GLOBAL_RESET, addr);
4022 clear_bit(HNAE3_FUNC_RESET, addr);
4023 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
4024 rst_level = HNAE3_FUNC_RESET;
4025 clear_bit(HNAE3_FUNC_RESET, addr);
4026 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
4027 rst_level = HNAE3_FLR_RESET;
4028 clear_bit(HNAE3_FLR_RESET, addr);
4029 }
4030
4031 if (hdev->reset_type != HNAE3_NONE_RESET &&
4032 rst_level < hdev->reset_type)
4033 return HNAE3_NONE_RESET;
4034
4035 return rst_level;
4036 }
4037
hclge_clear_reset_cause(struct hclge_dev * hdev)4038 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
4039 {
4040 u32 clearval = 0;
4041
4042 switch (hdev->reset_type) {
4043 case HNAE3_IMP_RESET:
4044 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
4045 break;
4046 case HNAE3_GLOBAL_RESET:
4047 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
4048 break;
4049 default:
4050 break;
4051 }
4052
4053 if (!clearval)
4054 return;
4055
4056 /* For revision 0x20, the reset interrupt source
4057 * can only be cleared after hardware reset done
4058 */
4059 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4060 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
4061 clearval);
4062
4063 hclge_enable_vector(&hdev->misc_vector, true);
4064 }
4065
hclge_reset_handshake(struct hclge_dev * hdev,bool enable)4066 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
4067 {
4068 u32 reg_val;
4069
4070 reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
4071 if (enable)
4072 reg_val |= HCLGE_COMM_NIC_SW_RST_RDY;
4073 else
4074 reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY;
4075
4076 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
4077 }
4078
hclge_func_reset_notify_vf(struct hclge_dev * hdev)4079 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
4080 {
4081 int ret;
4082
4083 ret = hclge_set_all_vf_rst(hdev, true);
4084 if (ret)
4085 return ret;
4086
4087 hclge_func_reset_sync_vf(hdev);
4088
4089 return 0;
4090 }
4091
hclge_reset_prepare_wait(struct hclge_dev * hdev)4092 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
4093 {
4094 u32 reg_val;
4095 int ret = 0;
4096
4097 switch (hdev->reset_type) {
4098 case HNAE3_FUNC_RESET:
4099 ret = hclge_func_reset_notify_vf(hdev);
4100 if (ret)
4101 return ret;
4102
4103 ret = hclge_func_reset_cmd(hdev, 0);
4104 if (ret) {
4105 dev_err(&hdev->pdev->dev,
4106 "asserting function reset fail %d!\n", ret);
4107 return ret;
4108 }
4109
4110 /* After performaning pf reset, it is not necessary to do the
4111 * mailbox handling or send any command to firmware, because
4112 * any mailbox handling or command to firmware is only valid
4113 * after hclge_comm_cmd_init is called.
4114 */
4115 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
4116 hdev->rst_stats.pf_rst_cnt++;
4117 break;
4118 case HNAE3_FLR_RESET:
4119 ret = hclge_func_reset_notify_vf(hdev);
4120 if (ret)
4121 return ret;
4122 break;
4123 case HNAE3_IMP_RESET:
4124 hclge_handle_imp_error(hdev);
4125 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4126 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
4127 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
4128 break;
4129 default:
4130 break;
4131 }
4132
4133 /* inform hardware that preparatory work is done */
4134 msleep(HCLGE_RESET_SYNC_TIME);
4135 hclge_reset_handshake(hdev, true);
4136 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
4137
4138 return ret;
4139 }
4140
hclge_show_rst_info(struct hclge_dev * hdev)4141 static void hclge_show_rst_info(struct hclge_dev *hdev)
4142 {
4143 char *buf;
4144
4145 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
4146 if (!buf)
4147 return;
4148
4149 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
4150
4151 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
4152
4153 kfree(buf);
4154 }
4155
hclge_reset_err_handle(struct hclge_dev * hdev)4156 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
4157 {
4158 #define MAX_RESET_FAIL_CNT 5
4159
4160 if (hdev->reset_pending) {
4161 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
4162 hdev->reset_pending);
4163 return true;
4164 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
4165 HCLGE_RESET_INT_M) {
4166 dev_info(&hdev->pdev->dev,
4167 "reset failed because new reset interrupt\n");
4168 hclge_clear_reset_cause(hdev);
4169 return false;
4170 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
4171 hdev->rst_stats.reset_fail_cnt++;
4172 set_bit(hdev->reset_type, &hdev->reset_pending);
4173 dev_info(&hdev->pdev->dev,
4174 "re-schedule reset task(%u)\n",
4175 hdev->rst_stats.reset_fail_cnt);
4176 return true;
4177 }
4178
4179 hclge_clear_reset_cause(hdev);
4180
4181 /* recover the handshake status when reset fail */
4182 hclge_reset_handshake(hdev, true);
4183
4184 dev_err(&hdev->pdev->dev, "Reset fail!\n");
4185
4186 hclge_show_rst_info(hdev);
4187
4188 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4189
4190 return false;
4191 }
4192
hclge_update_reset_level(struct hclge_dev * hdev)4193 static void hclge_update_reset_level(struct hclge_dev *hdev)
4194 {
4195 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4196 enum hnae3_reset_type reset_level;
4197
4198 /* reset request will not be set during reset, so clear
4199 * pending reset request to avoid unnecessary reset
4200 * caused by the same reason.
4201 */
4202 hclge_get_reset_level(ae_dev, &hdev->reset_request);
4203
4204 /* if default_reset_request has a higher level reset request,
4205 * it should be handled as soon as possible. since some errors
4206 * need this kind of reset to fix.
4207 */
4208 reset_level = hclge_get_reset_level(ae_dev,
4209 &hdev->default_reset_request);
4210 if (reset_level != HNAE3_NONE_RESET)
4211 set_bit(reset_level, &hdev->reset_request);
4212 }
4213
hclge_set_rst_done(struct hclge_dev * hdev)4214 static int hclge_set_rst_done(struct hclge_dev *hdev)
4215 {
4216 struct hclge_pf_rst_done_cmd *req;
4217 struct hclge_desc desc;
4218 int ret;
4219
4220 req = (struct hclge_pf_rst_done_cmd *)desc.data;
4221 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4222 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4223
4224 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4225 /* To be compatible with the old firmware, which does not support
4226 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4227 * return success
4228 */
4229 if (ret == -EOPNOTSUPP) {
4230 dev_warn(&hdev->pdev->dev,
4231 "current firmware does not support command(0x%x)!\n",
4232 HCLGE_OPC_PF_RST_DONE);
4233 return 0;
4234 } else if (ret) {
4235 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4236 ret);
4237 }
4238
4239 return ret;
4240 }
4241
hclge_reset_prepare_up(struct hclge_dev * hdev)4242 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4243 {
4244 int ret = 0;
4245
4246 switch (hdev->reset_type) {
4247 case HNAE3_FUNC_RESET:
4248 case HNAE3_FLR_RESET:
4249 ret = hclge_set_all_vf_rst(hdev, false);
4250 break;
4251 case HNAE3_GLOBAL_RESET:
4252 case HNAE3_IMP_RESET:
4253 ret = hclge_set_rst_done(hdev);
4254 break;
4255 default:
4256 break;
4257 }
4258
4259 /* clear up the handshake status after re-initialize done */
4260 hclge_reset_handshake(hdev, false);
4261
4262 return ret;
4263 }
4264
hclge_reset_stack(struct hclge_dev * hdev)4265 static int hclge_reset_stack(struct hclge_dev *hdev)
4266 {
4267 int ret;
4268
4269 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4270 if (ret)
4271 return ret;
4272
4273 ret = hclge_reset_ae_dev(hdev->ae_dev);
4274 if (ret)
4275 return ret;
4276
4277 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4278 }
4279
hclge_reset_prepare(struct hclge_dev * hdev)4280 static int hclge_reset_prepare(struct hclge_dev *hdev)
4281 {
4282 int ret;
4283
4284 hdev->rst_stats.reset_cnt++;
4285 /* perform reset of the stack & ae device for a client */
4286 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4287 if (ret)
4288 return ret;
4289
4290 rtnl_lock();
4291 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4292 rtnl_unlock();
4293 if (ret)
4294 return ret;
4295
4296 return hclge_reset_prepare_wait(hdev);
4297 }
4298
hclge_reset_rebuild(struct hclge_dev * hdev)4299 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4300 {
4301 int ret;
4302
4303 hdev->rst_stats.hw_reset_done_cnt++;
4304
4305 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4306 if (ret)
4307 return ret;
4308
4309 rtnl_lock();
4310 ret = hclge_reset_stack(hdev);
4311 rtnl_unlock();
4312 if (ret)
4313 return ret;
4314
4315 hclge_clear_reset_cause(hdev);
4316
4317 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4318 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4319 * times
4320 */
4321 if (ret &&
4322 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4323 return ret;
4324
4325 ret = hclge_reset_prepare_up(hdev);
4326 if (ret)
4327 return ret;
4328
4329 rtnl_lock();
4330 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4331 rtnl_unlock();
4332 if (ret)
4333 return ret;
4334
4335 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4336 if (ret)
4337 return ret;
4338
4339 hdev->last_reset_time = jiffies;
4340 hdev->rst_stats.reset_fail_cnt = 0;
4341 hdev->rst_stats.reset_done_cnt++;
4342 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4343
4344 hclge_update_reset_level(hdev);
4345
4346 return 0;
4347 }
4348
hclge_reset(struct hclge_dev * hdev)4349 static void hclge_reset(struct hclge_dev *hdev)
4350 {
4351 if (hclge_reset_prepare(hdev))
4352 goto err_reset;
4353
4354 if (hclge_reset_wait(hdev))
4355 goto err_reset;
4356
4357 if (hclge_reset_rebuild(hdev))
4358 goto err_reset;
4359
4360 return;
4361
4362 err_reset:
4363 if (hclge_reset_err_handle(hdev))
4364 hclge_reset_task_schedule(hdev);
4365 }
4366
hclge_reset_event(struct pci_dev * pdev,struct hnae3_handle * handle)4367 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4368 {
4369 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4370 struct hclge_dev *hdev = ae_dev->priv;
4371
4372 /* We might end up getting called broadly because of 2 below cases:
4373 * 1. Recoverable error was conveyed through APEI and only way to bring
4374 * normalcy is to reset.
4375 * 2. A new reset request from the stack due to timeout
4376 *
4377 * check if this is a new reset request and we are not here just because
4378 * last reset attempt did not succeed and watchdog hit us again. We will
4379 * know this if last reset request did not occur very recently (watchdog
4380 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4381 * In case of new request we reset the "reset level" to PF reset.
4382 * And if it is a repeat reset request of the most recent one then we
4383 * want to make sure we throttle the reset request. Therefore, we will
4384 * not allow it again before 3*HZ times.
4385 */
4386
4387 if (time_before(jiffies, (hdev->last_reset_time +
4388 HCLGE_RESET_INTERVAL))) {
4389 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4390 return;
4391 }
4392
4393 if (hdev->default_reset_request) {
4394 hdev->reset_level =
4395 hclge_get_reset_level(ae_dev,
4396 &hdev->default_reset_request);
4397 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4398 hdev->reset_level = HNAE3_FUNC_RESET;
4399 }
4400
4401 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4402 hdev->reset_level);
4403
4404 /* request reset & schedule reset task */
4405 set_bit(hdev->reset_level, &hdev->reset_request);
4406 hclge_reset_task_schedule(hdev);
4407
4408 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4409 hdev->reset_level++;
4410 }
4411
hclge_set_def_reset_request(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)4412 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4413 enum hnae3_reset_type rst_type)
4414 {
4415 struct hclge_dev *hdev = ae_dev->priv;
4416
4417 set_bit(rst_type, &hdev->default_reset_request);
4418 }
4419
hclge_reset_timer(struct timer_list * t)4420 static void hclge_reset_timer(struct timer_list *t)
4421 {
4422 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4423
4424 /* if default_reset_request has no value, it means that this reset
4425 * request has already be handled, so just return here
4426 */
4427 if (!hdev->default_reset_request)
4428 return;
4429
4430 dev_info(&hdev->pdev->dev,
4431 "triggering reset in reset timer\n");
4432 hclge_reset_event(hdev->pdev, NULL);
4433 }
4434
hclge_reset_subtask(struct hclge_dev * hdev)4435 static void hclge_reset_subtask(struct hclge_dev *hdev)
4436 {
4437 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4438
4439 /* check if there is any ongoing reset in the hardware. This status can
4440 * be checked from reset_pending. If there is then, we need to wait for
4441 * hardware to complete reset.
4442 * a. If we are able to figure out in reasonable time that hardware
4443 * has fully resetted then, we can proceed with driver, client
4444 * reset.
4445 * b. else, we can come back later to check this status so re-sched
4446 * now.
4447 */
4448 hdev->last_reset_time = jiffies;
4449 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4450 if (hdev->reset_type != HNAE3_NONE_RESET)
4451 hclge_reset(hdev);
4452
4453 /* check if we got any *new* reset requests to be honored */
4454 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4455 if (hdev->reset_type != HNAE3_NONE_RESET)
4456 hclge_do_reset(hdev);
4457
4458 hdev->reset_type = HNAE3_NONE_RESET;
4459 }
4460
hclge_handle_err_reset_request(struct hclge_dev * hdev)4461 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4462 {
4463 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4464 enum hnae3_reset_type reset_type;
4465
4466 if (ae_dev->hw_err_reset_req) {
4467 reset_type = hclge_get_reset_level(ae_dev,
4468 &ae_dev->hw_err_reset_req);
4469 hclge_set_def_reset_request(ae_dev, reset_type);
4470 }
4471
4472 if (hdev->default_reset_request && ae_dev->ops->reset_event)
4473 ae_dev->ops->reset_event(hdev->pdev, NULL);
4474
4475 /* enable interrupt after error handling complete */
4476 hclge_enable_vector(&hdev->misc_vector, true);
4477 }
4478
hclge_handle_err_recovery(struct hclge_dev * hdev)4479 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4480 {
4481 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4482
4483 ae_dev->hw_err_reset_req = 0;
4484
4485 if (hclge_find_error_source(hdev)) {
4486 hclge_handle_error_info_log(ae_dev);
4487 hclge_handle_mac_tnl(hdev);
4488 }
4489
4490 hclge_handle_err_reset_request(hdev);
4491 }
4492
hclge_misc_err_recovery(struct hclge_dev * hdev)4493 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4494 {
4495 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4496 struct device *dev = &hdev->pdev->dev;
4497 u32 msix_sts_reg;
4498
4499 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4500 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4501 if (hclge_handle_hw_msix_error
4502 (hdev, &hdev->default_reset_request))
4503 dev_info(dev, "received msix interrupt 0x%x\n",
4504 msix_sts_reg);
4505 }
4506
4507 hclge_handle_hw_ras_error(ae_dev);
4508
4509 hclge_handle_err_reset_request(hdev);
4510 }
4511
hclge_errhand_service_task(struct hclge_dev * hdev)4512 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4513 {
4514 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4515 return;
4516
4517 if (hnae3_dev_ras_imp_supported(hdev))
4518 hclge_handle_err_recovery(hdev);
4519 else
4520 hclge_misc_err_recovery(hdev);
4521 }
4522
hclge_reset_service_task(struct hclge_dev * hdev)4523 static void hclge_reset_service_task(struct hclge_dev *hdev)
4524 {
4525 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4526 return;
4527
4528 if (time_is_before_jiffies(hdev->last_rst_scheduled +
4529 HCLGE_RESET_SCHED_TIMEOUT))
4530 dev_warn(&hdev->pdev->dev,
4531 "reset service task is scheduled after %ums on cpu%u!\n",
4532 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
4533 smp_processor_id());
4534
4535 down(&hdev->reset_sem);
4536 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4537
4538 hclge_reset_subtask(hdev);
4539
4540 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4541 up(&hdev->reset_sem);
4542 }
4543
hclge_update_vport_alive(struct hclge_dev * hdev)4544 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4545 {
4546 #define HCLGE_ALIVE_SECONDS_NORMAL 8
4547
4548 unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ;
4549 int i;
4550
4551 /* start from vport 1 for PF is always alive */
4552 for (i = 1; i < hdev->num_alloc_vport; i++) {
4553 struct hclge_vport *vport = &hdev->vport[i];
4554
4555 if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) ||
4556 !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4557 continue;
4558 if (time_after(jiffies, vport->last_active_jiffies +
4559 alive_time)) {
4560 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4561 dev_warn(&hdev->pdev->dev,
4562 "VF %u heartbeat timeout\n",
4563 i - HCLGE_VF_VPORT_START_NUM);
4564 }
4565 }
4566 }
4567
hclge_periodic_service_task(struct hclge_dev * hdev)4568 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4569 {
4570 unsigned long delta = round_jiffies_relative(HZ);
4571
4572 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4573 return;
4574
4575 /* Always handle the link updating to make sure link state is
4576 * updated when it is triggered by mbx.
4577 */
4578 hclge_update_link_status(hdev);
4579 hclge_sync_mac_table(hdev);
4580 hclge_sync_promisc_mode(hdev);
4581 hclge_sync_fd_table(hdev);
4582
4583 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4584 delta = jiffies - hdev->last_serv_processed;
4585
4586 if (delta < round_jiffies_relative(HZ)) {
4587 delta = round_jiffies_relative(HZ) - delta;
4588 goto out;
4589 }
4590 }
4591
4592 hdev->serv_processed_cnt++;
4593 hclge_update_vport_alive(hdev);
4594
4595 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4596 hdev->last_serv_processed = jiffies;
4597 goto out;
4598 }
4599
4600 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4601 hclge_update_stats_for_all(hdev);
4602
4603 hclge_update_port_info(hdev);
4604 hclge_sync_vlan_filter(hdev);
4605
4606 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4607 hclge_rfs_filter_expire(hdev);
4608
4609 hdev->last_serv_processed = jiffies;
4610
4611 out:
4612 hclge_task_schedule(hdev, delta);
4613 }
4614
hclge_ptp_service_task(struct hclge_dev * hdev)4615 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4616 {
4617 unsigned long flags;
4618
4619 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4620 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4621 !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4622 return;
4623
4624 /* to prevent concurrence with the irq handler */
4625 spin_lock_irqsave(&hdev->ptp->lock, flags);
4626
4627 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4628 * handler may handle it just before spin_lock_irqsave().
4629 */
4630 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4631 hclge_ptp_clean_tx_hwts(hdev);
4632
4633 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4634 }
4635
hclge_service_task(struct work_struct * work)4636 static void hclge_service_task(struct work_struct *work)
4637 {
4638 struct hclge_dev *hdev =
4639 container_of(work, struct hclge_dev, service_task.work);
4640
4641 hclge_errhand_service_task(hdev);
4642 hclge_reset_service_task(hdev);
4643 hclge_ptp_service_task(hdev);
4644 hclge_mailbox_service_task(hdev);
4645 hclge_periodic_service_task(hdev);
4646
4647 /* Handle error recovery, reset and mbx again in case periodical task
4648 * delays the handling by calling hclge_task_schedule() in
4649 * hclge_periodic_service_task().
4650 */
4651 hclge_errhand_service_task(hdev);
4652 hclge_reset_service_task(hdev);
4653 hclge_mailbox_service_task(hdev);
4654 }
4655
hclge_get_vport(struct hnae3_handle * handle)4656 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4657 {
4658 /* VF handle has no client */
4659 if (!handle->client)
4660 return container_of(handle, struct hclge_vport, nic);
4661 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4662 return container_of(handle, struct hclge_vport, roce);
4663 else
4664 return container_of(handle, struct hclge_vport, nic);
4665 }
4666
hclge_get_vector_info(struct hclge_dev * hdev,u16 idx,struct hnae3_vector_info * vector_info)4667 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4668 struct hnae3_vector_info *vector_info)
4669 {
4670 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4671
4672 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4673
4674 /* need an extend offset to config vector >= 64 */
4675 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4676 vector_info->io_addr = hdev->hw.hw.io_base +
4677 HCLGE_VECTOR_REG_BASE +
4678 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4679 else
4680 vector_info->io_addr = hdev->hw.hw.io_base +
4681 HCLGE_VECTOR_EXT_REG_BASE +
4682 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4683 HCLGE_VECTOR_REG_OFFSET_H +
4684 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4685 HCLGE_VECTOR_REG_OFFSET;
4686
4687 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4688 hdev->vector_irq[idx] = vector_info->vector;
4689 }
4690
hclge_get_vector(struct hnae3_handle * handle,u16 vector_num,struct hnae3_vector_info * vector_info)4691 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4692 struct hnae3_vector_info *vector_info)
4693 {
4694 struct hclge_vport *vport = hclge_get_vport(handle);
4695 struct hnae3_vector_info *vector = vector_info;
4696 struct hclge_dev *hdev = vport->back;
4697 int alloc = 0;
4698 u16 i = 0;
4699 u16 j;
4700
4701 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4702 vector_num = min(hdev->num_msi_left, vector_num);
4703
4704 for (j = 0; j < vector_num; j++) {
4705 while (++i < hdev->num_nic_msi) {
4706 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4707 hclge_get_vector_info(hdev, i, vector);
4708 vector++;
4709 alloc++;
4710
4711 break;
4712 }
4713 }
4714 }
4715 hdev->num_msi_left -= alloc;
4716 hdev->num_msi_used += alloc;
4717
4718 return alloc;
4719 }
4720
hclge_get_vector_index(struct hclge_dev * hdev,int vector)4721 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4722 {
4723 int i;
4724
4725 for (i = 0; i < hdev->num_msi; i++)
4726 if (vector == hdev->vector_irq[i])
4727 return i;
4728
4729 return -EINVAL;
4730 }
4731
hclge_put_vector(struct hnae3_handle * handle,int vector)4732 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4733 {
4734 struct hclge_vport *vport = hclge_get_vport(handle);
4735 struct hclge_dev *hdev = vport->back;
4736 int vector_id;
4737
4738 vector_id = hclge_get_vector_index(hdev, vector);
4739 if (vector_id < 0) {
4740 dev_err(&hdev->pdev->dev,
4741 "Get vector index fail. vector = %d\n", vector);
4742 return vector_id;
4743 }
4744
4745 hclge_free_vector(hdev, vector_id);
4746
4747 return 0;
4748 }
4749
hclge_get_rss(struct hnae3_handle * handle,u32 * indir,u8 * key,u8 * hfunc)4750 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4751 u8 *key, u8 *hfunc)
4752 {
4753 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4754 struct hclge_vport *vport = hclge_get_vport(handle);
4755 struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg;
4756
4757 hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
4758
4759 hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
4760 ae_dev->dev_specs.rss_ind_tbl_size);
4761
4762 return 0;
4763 }
4764
hclge_set_rss(struct hnae3_handle * handle,const u32 * indir,const u8 * key,const u8 hfunc)4765 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4766 const u8 *key, const u8 hfunc)
4767 {
4768 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4769 struct hclge_vport *vport = hclge_get_vport(handle);
4770 struct hclge_dev *hdev = vport->back;
4771 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
4772 int ret, i;
4773
4774 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc);
4775 if (ret) {
4776 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4777 return ret;
4778 }
4779
4780 /* Update the shadow RSS table with user specified qids */
4781 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4782 rss_cfg->rss_indirection_tbl[i] = indir[i];
4783
4784 /* Update the hardware */
4785 return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw,
4786 rss_cfg->rss_indirection_tbl);
4787 }
4788
hclge_set_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4789 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4790 struct ethtool_rxnfc *nfc)
4791 {
4792 struct hclge_vport *vport = hclge_get_vport(handle);
4793 struct hclge_dev *hdev = vport->back;
4794 int ret;
4795
4796 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
4797 &hdev->rss_cfg, nfc);
4798 if (ret) {
4799 dev_err(&hdev->pdev->dev,
4800 "failed to set rss tuple, ret = %d.\n", ret);
4801 return ret;
4802 }
4803
4804 return 0;
4805 }
4806
hclge_get_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4807 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4808 struct ethtool_rxnfc *nfc)
4809 {
4810 struct hclge_vport *vport = hclge_get_vport(handle);
4811 u8 tuple_sets;
4812 int ret;
4813
4814 nfc->data = 0;
4815
4816 ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type,
4817 &tuple_sets);
4818 if (ret || !tuple_sets)
4819 return ret;
4820
4821 nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
4822
4823 return 0;
4824 }
4825
hclge_get_tc_size(struct hnae3_handle * handle)4826 static int hclge_get_tc_size(struct hnae3_handle *handle)
4827 {
4828 struct hclge_vport *vport = hclge_get_vport(handle);
4829 struct hclge_dev *hdev = vport->back;
4830
4831 return hdev->pf_rss_size_max;
4832 }
4833
hclge_init_rss_tc_mode(struct hclge_dev * hdev)4834 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4835 {
4836 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4837 struct hclge_vport *vport = hdev->vport;
4838 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4839 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4840 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4841 struct hnae3_tc_info *tc_info;
4842 u16 roundup_size;
4843 u16 rss_size;
4844 int i;
4845
4846 tc_info = &vport->nic.kinfo.tc_info;
4847 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4848 rss_size = tc_info->tqp_count[i];
4849 tc_valid[i] = 0;
4850
4851 if (!(hdev->hw_tc_map & BIT(i)))
4852 continue;
4853
4854 /* tc_size set to hardware is the log2 of roundup power of two
4855 * of rss_size, the acutal queue size is limited by indirection
4856 * table.
4857 */
4858 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4859 rss_size == 0) {
4860 dev_err(&hdev->pdev->dev,
4861 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4862 rss_size);
4863 return -EINVAL;
4864 }
4865
4866 roundup_size = roundup_pow_of_two(rss_size);
4867 roundup_size = ilog2(roundup_size);
4868
4869 tc_valid[i] = 1;
4870 tc_size[i] = roundup_size;
4871 tc_offset[i] = tc_info->tqp_offset[i];
4872 }
4873
4874 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
4875 tc_size);
4876 }
4877
hclge_rss_init_hw(struct hclge_dev * hdev)4878 int hclge_rss_init_hw(struct hclge_dev *hdev)
4879 {
4880 u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl;
4881 u8 *key = hdev->rss_cfg.rss_hash_key;
4882 u8 hfunc = hdev->rss_cfg.rss_algo;
4883 int ret;
4884
4885 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
4886 rss_indir);
4887 if (ret)
4888 return ret;
4889
4890 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key);
4891 if (ret)
4892 return ret;
4893
4894 ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, &hdev->rss_cfg);
4895 if (ret)
4896 return ret;
4897
4898 return hclge_init_rss_tc_mode(hdev);
4899 }
4900
hclge_bind_ring_with_vector(struct hclge_vport * vport,int vector_id,bool en,struct hnae3_ring_chain_node * ring_chain)4901 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4902 int vector_id, bool en,
4903 struct hnae3_ring_chain_node *ring_chain)
4904 {
4905 struct hclge_dev *hdev = vport->back;
4906 struct hnae3_ring_chain_node *node;
4907 struct hclge_desc desc;
4908 struct hclge_ctrl_vector_chain_cmd *req =
4909 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4910 enum hclge_comm_cmd_status status;
4911 enum hclge_opcode_type op;
4912 u16 tqp_type_and_id;
4913 int i;
4914
4915 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4916 hclge_cmd_setup_basic_desc(&desc, op, false);
4917 req->int_vector_id_l = hnae3_get_field(vector_id,
4918 HCLGE_VECTOR_ID_L_M,
4919 HCLGE_VECTOR_ID_L_S);
4920 req->int_vector_id_h = hnae3_get_field(vector_id,
4921 HCLGE_VECTOR_ID_H_M,
4922 HCLGE_VECTOR_ID_H_S);
4923
4924 i = 0;
4925 for (node = ring_chain; node; node = node->next) {
4926 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4927 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4928 HCLGE_INT_TYPE_S,
4929 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4930 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4931 HCLGE_TQP_ID_S, node->tqp_index);
4932 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4933 HCLGE_INT_GL_IDX_S,
4934 hnae3_get_field(node->int_gl_idx,
4935 HNAE3_RING_GL_IDX_M,
4936 HNAE3_RING_GL_IDX_S));
4937 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4938 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4939 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4940 req->vfid = vport->vport_id;
4941
4942 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4943 if (status) {
4944 dev_err(&hdev->pdev->dev,
4945 "Map TQP fail, status is %d.\n",
4946 status);
4947 return -EIO;
4948 }
4949 i = 0;
4950
4951 hclge_cmd_setup_basic_desc(&desc,
4952 op,
4953 false);
4954 req->int_vector_id_l =
4955 hnae3_get_field(vector_id,
4956 HCLGE_VECTOR_ID_L_M,
4957 HCLGE_VECTOR_ID_L_S);
4958 req->int_vector_id_h =
4959 hnae3_get_field(vector_id,
4960 HCLGE_VECTOR_ID_H_M,
4961 HCLGE_VECTOR_ID_H_S);
4962 }
4963 }
4964
4965 if (i > 0) {
4966 req->int_cause_num = i;
4967 req->vfid = vport->vport_id;
4968 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4969 if (status) {
4970 dev_err(&hdev->pdev->dev,
4971 "Map TQP fail, status is %d.\n", status);
4972 return -EIO;
4973 }
4974 }
4975
4976 return 0;
4977 }
4978
hclge_map_ring_to_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)4979 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4980 struct hnae3_ring_chain_node *ring_chain)
4981 {
4982 struct hclge_vport *vport = hclge_get_vport(handle);
4983 struct hclge_dev *hdev = vport->back;
4984 int vector_id;
4985
4986 vector_id = hclge_get_vector_index(hdev, vector);
4987 if (vector_id < 0) {
4988 dev_err(&hdev->pdev->dev,
4989 "failed to get vector index. vector=%d\n", vector);
4990 return vector_id;
4991 }
4992
4993 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4994 }
4995
hclge_unmap_ring_frm_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)4996 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4997 struct hnae3_ring_chain_node *ring_chain)
4998 {
4999 struct hclge_vport *vport = hclge_get_vport(handle);
5000 struct hclge_dev *hdev = vport->back;
5001 int vector_id, ret;
5002
5003 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5004 return 0;
5005
5006 vector_id = hclge_get_vector_index(hdev, vector);
5007 if (vector_id < 0) {
5008 dev_err(&handle->pdev->dev,
5009 "Get vector index fail. ret =%d\n", vector_id);
5010 return vector_id;
5011 }
5012
5013 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5014 if (ret)
5015 dev_err(&handle->pdev->dev,
5016 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5017 vector_id, ret);
5018
5019 return ret;
5020 }
5021
hclge_cmd_set_promisc_mode(struct hclge_dev * hdev,u8 vf_id,bool en_uc,bool en_mc,bool en_bc)5022 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5023 bool en_uc, bool en_mc, bool en_bc)
5024 {
5025 struct hclge_vport *vport = &hdev->vport[vf_id];
5026 struct hnae3_handle *handle = &vport->nic;
5027 struct hclge_promisc_cfg_cmd *req;
5028 struct hclge_desc desc;
5029 bool uc_tx_en = en_uc;
5030 u8 promisc_cfg = 0;
5031 int ret;
5032
5033 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5034
5035 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5036 req->vf_id = vf_id;
5037
5038 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5039 uc_tx_en = false;
5040
5041 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5042 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5043 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5044 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5045 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5046 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5047 req->extend_promisc = promisc_cfg;
5048
5049 /* to be compatible with DEVICE_VERSION_V1/2 */
5050 promisc_cfg = 0;
5051 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5052 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5053 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5054 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5055 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5056 req->promisc = promisc_cfg;
5057
5058 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5059 if (ret)
5060 dev_err(&hdev->pdev->dev,
5061 "failed to set vport %u promisc mode, ret = %d.\n",
5062 vf_id, ret);
5063
5064 return ret;
5065 }
5066
hclge_set_vport_promisc_mode(struct hclge_vport * vport,bool en_uc_pmc,bool en_mc_pmc,bool en_bc_pmc)5067 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5068 bool en_mc_pmc, bool en_bc_pmc)
5069 {
5070 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5071 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5072 }
5073
hclge_set_promisc_mode(struct hnae3_handle * handle,bool en_uc_pmc,bool en_mc_pmc)5074 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5075 bool en_mc_pmc)
5076 {
5077 struct hclge_vport *vport = hclge_get_vport(handle);
5078 struct hclge_dev *hdev = vport->back;
5079 bool en_bc_pmc = true;
5080
5081 /* For device whose version below V2, if broadcast promisc enabled,
5082 * vlan filter is always bypassed. So broadcast promisc should be
5083 * disabled until user enable promisc mode
5084 */
5085 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5086 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5087
5088 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5089 en_bc_pmc);
5090 }
5091
hclge_request_update_promisc_mode(struct hnae3_handle * handle)5092 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5093 {
5094 struct hclge_vport *vport = hclge_get_vport(handle);
5095
5096 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5097 }
5098
hclge_sync_fd_state(struct hclge_dev * hdev)5099 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5100 {
5101 if (hlist_empty(&hdev->fd_rule_list))
5102 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5103 }
5104
hclge_fd_inc_rule_cnt(struct hclge_dev * hdev,u16 location)5105 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5106 {
5107 if (!test_bit(location, hdev->fd_bmap)) {
5108 set_bit(location, hdev->fd_bmap);
5109 hdev->hclge_fd_rule_num++;
5110 }
5111 }
5112
hclge_fd_dec_rule_cnt(struct hclge_dev * hdev,u16 location)5113 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5114 {
5115 if (test_bit(location, hdev->fd_bmap)) {
5116 clear_bit(location, hdev->fd_bmap);
5117 hdev->hclge_fd_rule_num--;
5118 }
5119 }
5120
hclge_fd_free_node(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5121 static void hclge_fd_free_node(struct hclge_dev *hdev,
5122 struct hclge_fd_rule *rule)
5123 {
5124 hlist_del(&rule->rule_node);
5125 kfree(rule);
5126 hclge_sync_fd_state(hdev);
5127 }
5128
hclge_update_fd_rule_node(struct hclge_dev * hdev,struct hclge_fd_rule * old_rule,struct hclge_fd_rule * new_rule,enum HCLGE_FD_NODE_STATE state)5129 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5130 struct hclge_fd_rule *old_rule,
5131 struct hclge_fd_rule *new_rule,
5132 enum HCLGE_FD_NODE_STATE state)
5133 {
5134 switch (state) {
5135 case HCLGE_FD_TO_ADD:
5136 case HCLGE_FD_ACTIVE:
5137 /* 1) if the new state is TO_ADD, just replace the old rule
5138 * with the same location, no matter its state, because the
5139 * new rule will be configured to the hardware.
5140 * 2) if the new state is ACTIVE, it means the new rule
5141 * has been configured to the hardware, so just replace
5142 * the old rule node with the same location.
5143 * 3) for it doesn't add a new node to the list, so it's
5144 * unnecessary to update the rule number and fd_bmap.
5145 */
5146 new_rule->rule_node.next = old_rule->rule_node.next;
5147 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5148 memcpy(old_rule, new_rule, sizeof(*old_rule));
5149 kfree(new_rule);
5150 break;
5151 case HCLGE_FD_DELETED:
5152 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5153 hclge_fd_free_node(hdev, old_rule);
5154 break;
5155 case HCLGE_FD_TO_DEL:
5156 /* if new request is TO_DEL, and old rule is existent
5157 * 1) the state of old rule is TO_DEL, we need do nothing,
5158 * because we delete rule by location, other rule content
5159 * is unncessary.
5160 * 2) the state of old rule is ACTIVE, we need to change its
5161 * state to TO_DEL, so the rule will be deleted when periodic
5162 * task being scheduled.
5163 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5164 * been added to hardware, so we just delete the rule node from
5165 * fd_rule_list directly.
5166 */
5167 if (old_rule->state == HCLGE_FD_TO_ADD) {
5168 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5169 hclge_fd_free_node(hdev, old_rule);
5170 return;
5171 }
5172 old_rule->state = HCLGE_FD_TO_DEL;
5173 break;
5174 }
5175 }
5176
hclge_find_fd_rule(struct hlist_head * hlist,u16 location,struct hclge_fd_rule ** parent)5177 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5178 u16 location,
5179 struct hclge_fd_rule **parent)
5180 {
5181 struct hclge_fd_rule *rule;
5182 struct hlist_node *node;
5183
5184 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5185 if (rule->location == location)
5186 return rule;
5187 else if (rule->location > location)
5188 return NULL;
5189 /* record the parent node, use to keep the nodes in fd_rule_list
5190 * in ascend order.
5191 */
5192 *parent = rule;
5193 }
5194
5195 return NULL;
5196 }
5197
5198 /* insert fd rule node in ascend order according to rule->location */
hclge_fd_insert_rule_node(struct hlist_head * hlist,struct hclge_fd_rule * rule,struct hclge_fd_rule * parent)5199 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5200 struct hclge_fd_rule *rule,
5201 struct hclge_fd_rule *parent)
5202 {
5203 INIT_HLIST_NODE(&rule->rule_node);
5204
5205 if (parent)
5206 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5207 else
5208 hlist_add_head(&rule->rule_node, hlist);
5209 }
5210
hclge_fd_set_user_def_cmd(struct hclge_dev * hdev,struct hclge_fd_user_def_cfg * cfg)5211 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5212 struct hclge_fd_user_def_cfg *cfg)
5213 {
5214 struct hclge_fd_user_def_cfg_cmd *req;
5215 struct hclge_desc desc;
5216 u16 data = 0;
5217 int ret;
5218
5219 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5220
5221 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5222
5223 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5224 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5225 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5226 req->ol2_cfg = cpu_to_le16(data);
5227
5228 data = 0;
5229 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5230 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5231 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5232 req->ol3_cfg = cpu_to_le16(data);
5233
5234 data = 0;
5235 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5236 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5237 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5238 req->ol4_cfg = cpu_to_le16(data);
5239
5240 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5241 if (ret)
5242 dev_err(&hdev->pdev->dev,
5243 "failed to set fd user def data, ret= %d\n", ret);
5244 return ret;
5245 }
5246
hclge_sync_fd_user_def_cfg(struct hclge_dev * hdev,bool locked)5247 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5248 {
5249 int ret;
5250
5251 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5252 return;
5253
5254 if (!locked)
5255 spin_lock_bh(&hdev->fd_rule_lock);
5256
5257 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5258 if (ret)
5259 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5260
5261 if (!locked)
5262 spin_unlock_bh(&hdev->fd_rule_lock);
5263 }
5264
hclge_fd_check_user_def_refcnt(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5265 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5266 struct hclge_fd_rule *rule)
5267 {
5268 struct hlist_head *hlist = &hdev->fd_rule_list;
5269 struct hclge_fd_rule *fd_rule, *parent = NULL;
5270 struct hclge_fd_user_def_info *info, *old_info;
5271 struct hclge_fd_user_def_cfg *cfg;
5272
5273 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5274 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5275 return 0;
5276
5277 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5278 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5279 info = &rule->ep.user_def;
5280
5281 if (!cfg->ref_cnt || cfg->offset == info->offset)
5282 return 0;
5283
5284 if (cfg->ref_cnt > 1)
5285 goto error;
5286
5287 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5288 if (fd_rule) {
5289 old_info = &fd_rule->ep.user_def;
5290 if (info->layer == old_info->layer)
5291 return 0;
5292 }
5293
5294 error:
5295 dev_err(&hdev->pdev->dev,
5296 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5297 info->layer + 1);
5298 return -ENOSPC;
5299 }
5300
hclge_fd_inc_user_def_refcnt(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5301 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5302 struct hclge_fd_rule *rule)
5303 {
5304 struct hclge_fd_user_def_cfg *cfg;
5305
5306 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5307 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5308 return;
5309
5310 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5311 if (!cfg->ref_cnt) {
5312 cfg->offset = rule->ep.user_def.offset;
5313 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5314 }
5315 cfg->ref_cnt++;
5316 }
5317
hclge_fd_dec_user_def_refcnt(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5318 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5319 struct hclge_fd_rule *rule)
5320 {
5321 struct hclge_fd_user_def_cfg *cfg;
5322
5323 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5324 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5325 return;
5326
5327 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5328 if (!cfg->ref_cnt)
5329 return;
5330
5331 cfg->ref_cnt--;
5332 if (!cfg->ref_cnt) {
5333 cfg->offset = 0;
5334 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5335 }
5336 }
5337
hclge_update_fd_list(struct hclge_dev * hdev,enum HCLGE_FD_NODE_STATE state,u16 location,struct hclge_fd_rule * new_rule)5338 static void hclge_update_fd_list(struct hclge_dev *hdev,
5339 enum HCLGE_FD_NODE_STATE state, u16 location,
5340 struct hclge_fd_rule *new_rule)
5341 {
5342 struct hlist_head *hlist = &hdev->fd_rule_list;
5343 struct hclge_fd_rule *fd_rule, *parent = NULL;
5344
5345 fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5346 if (fd_rule) {
5347 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5348 if (state == HCLGE_FD_ACTIVE)
5349 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5350 hclge_sync_fd_user_def_cfg(hdev, true);
5351
5352 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5353 return;
5354 }
5355
5356 /* it's unlikely to fail here, because we have checked the rule
5357 * exist before.
5358 */
5359 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5360 dev_warn(&hdev->pdev->dev,
5361 "failed to delete fd rule %u, it's inexistent\n",
5362 location);
5363 return;
5364 }
5365
5366 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5367 hclge_sync_fd_user_def_cfg(hdev, true);
5368
5369 hclge_fd_insert_rule_node(hlist, new_rule, parent);
5370 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5371
5372 if (state == HCLGE_FD_TO_ADD) {
5373 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5374 hclge_task_schedule(hdev, 0);
5375 }
5376 }
5377
hclge_get_fd_mode(struct hclge_dev * hdev,u8 * fd_mode)5378 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5379 {
5380 struct hclge_get_fd_mode_cmd *req;
5381 struct hclge_desc desc;
5382 int ret;
5383
5384 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5385
5386 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5387
5388 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5389 if (ret) {
5390 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5391 return ret;
5392 }
5393
5394 *fd_mode = req->mode;
5395
5396 return ret;
5397 }
5398
hclge_get_fd_allocation(struct hclge_dev * hdev,u32 * stage1_entry_num,u32 * stage2_entry_num,u16 * stage1_counter_num,u16 * stage2_counter_num)5399 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5400 u32 *stage1_entry_num,
5401 u32 *stage2_entry_num,
5402 u16 *stage1_counter_num,
5403 u16 *stage2_counter_num)
5404 {
5405 struct hclge_get_fd_allocation_cmd *req;
5406 struct hclge_desc desc;
5407 int ret;
5408
5409 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5410
5411 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5412
5413 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5414 if (ret) {
5415 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5416 ret);
5417 return ret;
5418 }
5419
5420 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5421 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5422 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5423 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5424
5425 return ret;
5426 }
5427
hclge_set_fd_key_config(struct hclge_dev * hdev,enum HCLGE_FD_STAGE stage_num)5428 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5429 enum HCLGE_FD_STAGE stage_num)
5430 {
5431 struct hclge_set_fd_key_config_cmd *req;
5432 struct hclge_fd_key_cfg *stage;
5433 struct hclge_desc desc;
5434 int ret;
5435
5436 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5437
5438 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5439 stage = &hdev->fd_cfg.key_cfg[stage_num];
5440 req->stage = stage_num;
5441 req->key_select = stage->key_sel;
5442 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5443 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5444 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5445 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5446 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5447 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5448
5449 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5450 if (ret)
5451 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5452
5453 return ret;
5454 }
5455
hclge_fd_disable_user_def(struct hclge_dev * hdev)5456 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5457 {
5458 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5459
5460 spin_lock_bh(&hdev->fd_rule_lock);
5461 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5462 spin_unlock_bh(&hdev->fd_rule_lock);
5463
5464 hclge_fd_set_user_def_cmd(hdev, cfg);
5465 }
5466
hclge_init_fd_config(struct hclge_dev * hdev)5467 static int hclge_init_fd_config(struct hclge_dev *hdev)
5468 {
5469 #define LOW_2_WORDS 0x03
5470 struct hclge_fd_key_cfg *key_cfg;
5471 int ret;
5472
5473 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
5474 return 0;
5475
5476 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5477 if (ret)
5478 return ret;
5479
5480 switch (hdev->fd_cfg.fd_mode) {
5481 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5482 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5483 break;
5484 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5485 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5486 break;
5487 default:
5488 dev_err(&hdev->pdev->dev,
5489 "Unsupported flow director mode %u\n",
5490 hdev->fd_cfg.fd_mode);
5491 return -EOPNOTSUPP;
5492 }
5493
5494 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5495 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5496 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5497 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5498 key_cfg->outer_sipv6_word_en = 0;
5499 key_cfg->outer_dipv6_word_en = 0;
5500
5501 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5502 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5503 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5504 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5505
5506 /* If use max 400bit key, we can support tuples for ether type */
5507 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5508 key_cfg->tuple_active |=
5509 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5510 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5511 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5512 }
5513
5514 /* roce_type is used to filter roce frames
5515 * dst_vport is used to specify the rule
5516 */
5517 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5518
5519 ret = hclge_get_fd_allocation(hdev,
5520 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5521 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5522 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5523 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5524 if (ret)
5525 return ret;
5526
5527 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5528 }
5529
hclge_fd_tcam_config(struct hclge_dev * hdev,u8 stage,bool sel_x,int loc,u8 * key,bool is_add)5530 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5531 int loc, u8 *key, bool is_add)
5532 {
5533 struct hclge_fd_tcam_config_1_cmd *req1;
5534 struct hclge_fd_tcam_config_2_cmd *req2;
5535 struct hclge_fd_tcam_config_3_cmd *req3;
5536 struct hclge_desc desc[3];
5537 int ret;
5538
5539 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5540 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
5541 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5542 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
5543 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5544
5545 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5546 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5547 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5548
5549 req1->stage = stage;
5550 req1->xy_sel = sel_x ? 1 : 0;
5551 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5552 req1->index = cpu_to_le32(loc);
5553 req1->entry_vld = sel_x ? is_add : 0;
5554
5555 if (key) {
5556 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5557 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5558 sizeof(req2->tcam_data));
5559 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5560 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5561 }
5562
5563 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5564 if (ret)
5565 dev_err(&hdev->pdev->dev,
5566 "config tcam key fail, ret=%d\n",
5567 ret);
5568
5569 return ret;
5570 }
5571
hclge_fd_ad_config(struct hclge_dev * hdev,u8 stage,int loc,struct hclge_fd_ad_data * action)5572 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5573 struct hclge_fd_ad_data *action)
5574 {
5575 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5576 struct hclge_fd_ad_config_cmd *req;
5577 struct hclge_desc desc;
5578 u64 ad_data = 0;
5579 int ret;
5580
5581 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5582
5583 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5584 req->index = cpu_to_le32(loc);
5585 req->stage = stage;
5586
5587 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5588 action->write_rule_id_to_bd);
5589 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5590 action->rule_id);
5591 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5592 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5593 action->override_tc);
5594 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5595 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5596 }
5597 ad_data <<= 32;
5598 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5599 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5600 action->forward_to_direct_queue);
5601 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5602 action->queue_id);
5603 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5604 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5605 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5606 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5607 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5608 action->counter_id);
5609
5610 req->ad_data = cpu_to_le64(ad_data);
5611 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5612 if (ret)
5613 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5614
5615 return ret;
5616 }
5617
hclge_fd_convert_tuple(u32 tuple_bit,u8 * key_x,u8 * key_y,struct hclge_fd_rule * rule)5618 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5619 struct hclge_fd_rule *rule)
5620 {
5621 int offset, moffset, ip_offset;
5622 enum HCLGE_FD_KEY_OPT key_opt;
5623 u16 tmp_x_s, tmp_y_s;
5624 u32 tmp_x_l, tmp_y_l;
5625 u8 *p = (u8 *)rule;
5626 int i;
5627
5628 if (rule->unused_tuple & BIT(tuple_bit))
5629 return true;
5630
5631 key_opt = tuple_key_info[tuple_bit].key_opt;
5632 offset = tuple_key_info[tuple_bit].offset;
5633 moffset = tuple_key_info[tuple_bit].moffset;
5634
5635 switch (key_opt) {
5636 case KEY_OPT_U8:
5637 calc_x(*key_x, p[offset], p[moffset]);
5638 calc_y(*key_y, p[offset], p[moffset]);
5639
5640 return true;
5641 case KEY_OPT_LE16:
5642 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5643 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5644 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5645 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5646
5647 return true;
5648 case KEY_OPT_LE32:
5649 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5650 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5651 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5652 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5653
5654 return true;
5655 case KEY_OPT_MAC:
5656 for (i = 0; i < ETH_ALEN; i++) {
5657 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5658 p[moffset + i]);
5659 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5660 p[moffset + i]);
5661 }
5662
5663 return true;
5664 case KEY_OPT_IP:
5665 ip_offset = IPV4_INDEX * sizeof(u32);
5666 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5667 *(u32 *)(&p[moffset + ip_offset]));
5668 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5669 *(u32 *)(&p[moffset + ip_offset]));
5670 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5671 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5672
5673 return true;
5674 default:
5675 return false;
5676 }
5677 }
5678
hclge_get_port_number(enum HLCGE_PORT_TYPE port_type,u8 pf_id,u8 vf_id,u8 network_port_id)5679 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5680 u8 vf_id, u8 network_port_id)
5681 {
5682 u32 port_number = 0;
5683
5684 if (port_type == HOST_PORT) {
5685 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5686 pf_id);
5687 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5688 vf_id);
5689 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5690 } else {
5691 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5692 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5693 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5694 }
5695
5696 return port_number;
5697 }
5698
hclge_fd_convert_meta_data(struct hclge_fd_key_cfg * key_cfg,__le32 * key_x,__le32 * key_y,struct hclge_fd_rule * rule)5699 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5700 __le32 *key_x, __le32 *key_y,
5701 struct hclge_fd_rule *rule)
5702 {
5703 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5704 u8 cur_pos = 0, tuple_size, shift_bits;
5705 unsigned int i;
5706
5707 for (i = 0; i < MAX_META_DATA; i++) {
5708 tuple_size = meta_data_key_info[i].key_length;
5709 tuple_bit = key_cfg->meta_data_active & BIT(i);
5710
5711 switch (tuple_bit) {
5712 case BIT(ROCE_TYPE):
5713 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5714 cur_pos += tuple_size;
5715 break;
5716 case BIT(DST_VPORT):
5717 port_number = hclge_get_port_number(HOST_PORT, 0,
5718 rule->vf_id, 0);
5719 hnae3_set_field(meta_data,
5720 GENMASK(cur_pos + tuple_size, cur_pos),
5721 cur_pos, port_number);
5722 cur_pos += tuple_size;
5723 break;
5724 default:
5725 break;
5726 }
5727 }
5728
5729 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5730 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5731 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5732
5733 *key_x = cpu_to_le32(tmp_x << shift_bits);
5734 *key_y = cpu_to_le32(tmp_y << shift_bits);
5735 }
5736
5737 /* A complete key is combined with meta data key and tuple key.
5738 * Meta data key is stored at the MSB region, and tuple key is stored at
5739 * the LSB region, unused bits will be filled 0.
5740 */
hclge_config_key(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5741 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5742 struct hclge_fd_rule *rule)
5743 {
5744 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5745 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5746 u8 *cur_key_x, *cur_key_y;
5747 u8 meta_data_region;
5748 u8 tuple_size;
5749 int ret;
5750 u32 i;
5751
5752 memset(key_x, 0, sizeof(key_x));
5753 memset(key_y, 0, sizeof(key_y));
5754 cur_key_x = key_x;
5755 cur_key_y = key_y;
5756
5757 for (i = 0; i < MAX_TUPLE; i++) {
5758 bool tuple_valid;
5759
5760 tuple_size = tuple_key_info[i].key_length / 8;
5761 if (!(key_cfg->tuple_active & BIT(i)))
5762 continue;
5763
5764 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5765 cur_key_y, rule);
5766 if (tuple_valid) {
5767 cur_key_x += tuple_size;
5768 cur_key_y += tuple_size;
5769 }
5770 }
5771
5772 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5773 MAX_META_DATA_LENGTH / 8;
5774
5775 hclge_fd_convert_meta_data(key_cfg,
5776 (__le32 *)(key_x + meta_data_region),
5777 (__le32 *)(key_y + meta_data_region),
5778 rule);
5779
5780 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5781 true);
5782 if (ret) {
5783 dev_err(&hdev->pdev->dev,
5784 "fd key_y config fail, loc=%u, ret=%d\n",
5785 rule->queue_id, ret);
5786 return ret;
5787 }
5788
5789 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5790 true);
5791 if (ret)
5792 dev_err(&hdev->pdev->dev,
5793 "fd key_x config fail, loc=%u, ret=%d\n",
5794 rule->queue_id, ret);
5795 return ret;
5796 }
5797
hclge_config_action(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5798 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5799 struct hclge_fd_rule *rule)
5800 {
5801 struct hclge_vport *vport = hdev->vport;
5802 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5803 struct hclge_fd_ad_data ad_data;
5804
5805 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5806 ad_data.ad_id = rule->location;
5807
5808 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5809 ad_data.drop_packet = true;
5810 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5811 ad_data.override_tc = true;
5812 ad_data.queue_id =
5813 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5814 ad_data.tc_size =
5815 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5816 } else {
5817 ad_data.forward_to_direct_queue = true;
5818 ad_data.queue_id = rule->queue_id;
5819 }
5820
5821 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
5822 ad_data.use_counter = true;
5823 ad_data.counter_id = rule->vf_id %
5824 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
5825 } else {
5826 ad_data.use_counter = false;
5827 ad_data.counter_id = 0;
5828 }
5829
5830 ad_data.use_next_stage = false;
5831 ad_data.next_input_key = 0;
5832
5833 ad_data.write_rule_id_to_bd = true;
5834 ad_data.rule_id = rule->location;
5835
5836 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5837 }
5838
hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec * spec,u32 * unused_tuple)5839 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5840 u32 *unused_tuple)
5841 {
5842 if (!spec || !unused_tuple)
5843 return -EINVAL;
5844
5845 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5846
5847 if (!spec->ip4src)
5848 *unused_tuple |= BIT(INNER_SRC_IP);
5849
5850 if (!spec->ip4dst)
5851 *unused_tuple |= BIT(INNER_DST_IP);
5852
5853 if (!spec->psrc)
5854 *unused_tuple |= BIT(INNER_SRC_PORT);
5855
5856 if (!spec->pdst)
5857 *unused_tuple |= BIT(INNER_DST_PORT);
5858
5859 if (!spec->tos)
5860 *unused_tuple |= BIT(INNER_IP_TOS);
5861
5862 return 0;
5863 }
5864
hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec * spec,u32 * unused_tuple)5865 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5866 u32 *unused_tuple)
5867 {
5868 if (!spec || !unused_tuple)
5869 return -EINVAL;
5870
5871 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5872 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5873
5874 if (!spec->ip4src)
5875 *unused_tuple |= BIT(INNER_SRC_IP);
5876
5877 if (!spec->ip4dst)
5878 *unused_tuple |= BIT(INNER_DST_IP);
5879
5880 if (!spec->tos)
5881 *unused_tuple |= BIT(INNER_IP_TOS);
5882
5883 if (!spec->proto)
5884 *unused_tuple |= BIT(INNER_IP_PROTO);
5885
5886 if (spec->l4_4_bytes)
5887 return -EOPNOTSUPP;
5888
5889 if (spec->ip_ver != ETH_RX_NFC_IP4)
5890 return -EOPNOTSUPP;
5891
5892 return 0;
5893 }
5894
hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec * spec,u32 * unused_tuple)5895 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5896 u32 *unused_tuple)
5897 {
5898 if (!spec || !unused_tuple)
5899 return -EINVAL;
5900
5901 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5902
5903 /* check whether src/dst ip address used */
5904 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5905 *unused_tuple |= BIT(INNER_SRC_IP);
5906
5907 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5908 *unused_tuple |= BIT(INNER_DST_IP);
5909
5910 if (!spec->psrc)
5911 *unused_tuple |= BIT(INNER_SRC_PORT);
5912
5913 if (!spec->pdst)
5914 *unused_tuple |= BIT(INNER_DST_PORT);
5915
5916 if (!spec->tclass)
5917 *unused_tuple |= BIT(INNER_IP_TOS);
5918
5919 return 0;
5920 }
5921
hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec * spec,u32 * unused_tuple)5922 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5923 u32 *unused_tuple)
5924 {
5925 if (!spec || !unused_tuple)
5926 return -EINVAL;
5927
5928 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5929 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5930
5931 /* check whether src/dst ip address used */
5932 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5933 *unused_tuple |= BIT(INNER_SRC_IP);
5934
5935 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5936 *unused_tuple |= BIT(INNER_DST_IP);
5937
5938 if (!spec->l4_proto)
5939 *unused_tuple |= BIT(INNER_IP_PROTO);
5940
5941 if (!spec->tclass)
5942 *unused_tuple |= BIT(INNER_IP_TOS);
5943
5944 if (spec->l4_4_bytes)
5945 return -EOPNOTSUPP;
5946
5947 return 0;
5948 }
5949
hclge_fd_check_ether_tuple(struct ethhdr * spec,u32 * unused_tuple)5950 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5951 {
5952 if (!spec || !unused_tuple)
5953 return -EINVAL;
5954
5955 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5956 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5957 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5958
5959 if (is_zero_ether_addr(spec->h_source))
5960 *unused_tuple |= BIT(INNER_SRC_MAC);
5961
5962 if (is_zero_ether_addr(spec->h_dest))
5963 *unused_tuple |= BIT(INNER_DST_MAC);
5964
5965 if (!spec->h_proto)
5966 *unused_tuple |= BIT(INNER_ETH_TYPE);
5967
5968 return 0;
5969 }
5970
hclge_fd_check_ext_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple)5971 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5972 struct ethtool_rx_flow_spec *fs,
5973 u32 *unused_tuple)
5974 {
5975 if (fs->flow_type & FLOW_EXT) {
5976 if (fs->h_ext.vlan_etype) {
5977 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5978 return -EOPNOTSUPP;
5979 }
5980
5981 if (!fs->h_ext.vlan_tci)
5982 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5983
5984 if (fs->m_ext.vlan_tci &&
5985 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5986 dev_err(&hdev->pdev->dev,
5987 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5988 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5989 return -EINVAL;
5990 }
5991 } else {
5992 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5993 }
5994
5995 if (fs->flow_type & FLOW_MAC_EXT) {
5996 if (hdev->fd_cfg.fd_mode !=
5997 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5998 dev_err(&hdev->pdev->dev,
5999 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6000 return -EOPNOTSUPP;
6001 }
6002
6003 if (is_zero_ether_addr(fs->h_ext.h_dest))
6004 *unused_tuple |= BIT(INNER_DST_MAC);
6005 else
6006 *unused_tuple &= ~BIT(INNER_DST_MAC);
6007 }
6008
6009 return 0;
6010 }
6011
hclge_fd_get_user_def_layer(u32 flow_type,u32 * unused_tuple,struct hclge_fd_user_def_info * info)6012 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6013 struct hclge_fd_user_def_info *info)
6014 {
6015 switch (flow_type) {
6016 case ETHER_FLOW:
6017 info->layer = HCLGE_FD_USER_DEF_L2;
6018 *unused_tuple &= ~BIT(INNER_L2_RSV);
6019 break;
6020 case IP_USER_FLOW:
6021 case IPV6_USER_FLOW:
6022 info->layer = HCLGE_FD_USER_DEF_L3;
6023 *unused_tuple &= ~BIT(INNER_L3_RSV);
6024 break;
6025 case TCP_V4_FLOW:
6026 case UDP_V4_FLOW:
6027 case TCP_V6_FLOW:
6028 case UDP_V6_FLOW:
6029 info->layer = HCLGE_FD_USER_DEF_L4;
6030 *unused_tuple &= ~BIT(INNER_L4_RSV);
6031 break;
6032 default:
6033 return -EOPNOTSUPP;
6034 }
6035
6036 return 0;
6037 }
6038
hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec * fs)6039 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6040 {
6041 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6042 }
6043
hclge_fd_parse_user_def_field(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple,struct hclge_fd_user_def_info * info)6044 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6045 struct ethtool_rx_flow_spec *fs,
6046 u32 *unused_tuple,
6047 struct hclge_fd_user_def_info *info)
6048 {
6049 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6050 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6051 u16 data, offset, data_mask, offset_mask;
6052 int ret;
6053
6054 info->layer = HCLGE_FD_USER_DEF_NONE;
6055 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6056
6057 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6058 return 0;
6059
6060 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6061 * for data, and bit32~47 is used for offset.
6062 */
6063 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6064 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6065 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6066 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6067
6068 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6069 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6070 return -EOPNOTSUPP;
6071 }
6072
6073 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6074 dev_err(&hdev->pdev->dev,
6075 "user-def offset[%u] should be no more than %u\n",
6076 offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6077 return -EINVAL;
6078 }
6079
6080 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6081 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6082 return -EINVAL;
6083 }
6084
6085 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6086 if (ret) {
6087 dev_err(&hdev->pdev->dev,
6088 "unsupported flow type for user-def bytes, ret = %d\n",
6089 ret);
6090 return ret;
6091 }
6092
6093 info->data = data;
6094 info->data_mask = data_mask;
6095 info->offset = offset;
6096
6097 return 0;
6098 }
6099
hclge_fd_check_spec(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple,struct hclge_fd_user_def_info * info)6100 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6101 struct ethtool_rx_flow_spec *fs,
6102 u32 *unused_tuple,
6103 struct hclge_fd_user_def_info *info)
6104 {
6105 u32 flow_type;
6106 int ret;
6107
6108 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6109 dev_err(&hdev->pdev->dev,
6110 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6111 fs->location,
6112 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6113 return -EINVAL;
6114 }
6115
6116 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6117 if (ret)
6118 return ret;
6119
6120 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6121 switch (flow_type) {
6122 case SCTP_V4_FLOW:
6123 case TCP_V4_FLOW:
6124 case UDP_V4_FLOW:
6125 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6126 unused_tuple);
6127 break;
6128 case IP_USER_FLOW:
6129 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6130 unused_tuple);
6131 break;
6132 case SCTP_V6_FLOW:
6133 case TCP_V6_FLOW:
6134 case UDP_V6_FLOW:
6135 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6136 unused_tuple);
6137 break;
6138 case IPV6_USER_FLOW:
6139 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6140 unused_tuple);
6141 break;
6142 case ETHER_FLOW:
6143 if (hdev->fd_cfg.fd_mode !=
6144 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6145 dev_err(&hdev->pdev->dev,
6146 "ETHER_FLOW is not supported in current fd mode!\n");
6147 return -EOPNOTSUPP;
6148 }
6149
6150 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6151 unused_tuple);
6152 break;
6153 default:
6154 dev_err(&hdev->pdev->dev,
6155 "unsupported protocol type, protocol type = %#x\n",
6156 flow_type);
6157 return -EOPNOTSUPP;
6158 }
6159
6160 if (ret) {
6161 dev_err(&hdev->pdev->dev,
6162 "failed to check flow union tuple, ret = %d\n",
6163 ret);
6164 return ret;
6165 }
6166
6167 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6168 }
6169
hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule,u8 ip_proto)6170 static void hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec *fs,
6171 struct hclge_fd_rule *rule, u8 ip_proto)
6172 {
6173 rule->tuples.src_ip[IPV4_INDEX] =
6174 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6175 rule->tuples_mask.src_ip[IPV4_INDEX] =
6176 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6177
6178 rule->tuples.dst_ip[IPV4_INDEX] =
6179 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6180 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6181 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6182
6183 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6184 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6185
6186 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6187 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6188
6189 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6190 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6191
6192 rule->tuples.ether_proto = ETH_P_IP;
6193 rule->tuples_mask.ether_proto = 0xFFFF;
6194
6195 rule->tuples.ip_proto = ip_proto;
6196 rule->tuples_mask.ip_proto = 0xFF;
6197 }
6198
hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6199 static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs,
6200 struct hclge_fd_rule *rule)
6201 {
6202 rule->tuples.src_ip[IPV4_INDEX] =
6203 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6204 rule->tuples_mask.src_ip[IPV4_INDEX] =
6205 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6206
6207 rule->tuples.dst_ip[IPV4_INDEX] =
6208 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6209 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6210 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6211
6212 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6213 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6214
6215 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6216 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6217
6218 rule->tuples.ether_proto = ETH_P_IP;
6219 rule->tuples_mask.ether_proto = 0xFFFF;
6220 }
6221
hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule,u8 ip_proto)6222 static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs,
6223 struct hclge_fd_rule *rule, u8 ip_proto)
6224 {
6225 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6226 IPV6_SIZE);
6227 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6228 IPV6_SIZE);
6229
6230 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6231 IPV6_SIZE);
6232 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6233 IPV6_SIZE);
6234
6235 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6236 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6237
6238 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6239 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6240
6241 rule->tuples.ether_proto = ETH_P_IPV6;
6242 rule->tuples_mask.ether_proto = 0xFFFF;
6243
6244 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6245 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6246
6247 rule->tuples.ip_proto = ip_proto;
6248 rule->tuples_mask.ip_proto = 0xFF;
6249 }
6250
hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6251 static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs,
6252 struct hclge_fd_rule *rule)
6253 {
6254 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6255 IPV6_SIZE);
6256 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6257 IPV6_SIZE);
6258
6259 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6260 IPV6_SIZE);
6261 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6262 IPV6_SIZE);
6263
6264 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6265 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6266
6267 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6268 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6269
6270 rule->tuples.ether_proto = ETH_P_IPV6;
6271 rule->tuples_mask.ether_proto = 0xFFFF;
6272 }
6273
hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6274 static void hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec *fs,
6275 struct hclge_fd_rule *rule)
6276 {
6277 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6278 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6279
6280 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6281 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6282
6283 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6284 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6285 }
6286
hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info * info,struct hclge_fd_rule * rule)6287 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6288 struct hclge_fd_rule *rule)
6289 {
6290 switch (info->layer) {
6291 case HCLGE_FD_USER_DEF_L2:
6292 rule->tuples.l2_user_def = info->data;
6293 rule->tuples_mask.l2_user_def = info->data_mask;
6294 break;
6295 case HCLGE_FD_USER_DEF_L3:
6296 rule->tuples.l3_user_def = info->data;
6297 rule->tuples_mask.l3_user_def = info->data_mask;
6298 break;
6299 case HCLGE_FD_USER_DEF_L4:
6300 rule->tuples.l4_user_def = (u32)info->data << 16;
6301 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6302 break;
6303 default:
6304 break;
6305 }
6306
6307 rule->ep.user_def = *info;
6308 }
6309
hclge_fd_get_tuple(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule,struct hclge_fd_user_def_info * info)6310 static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec *fs,
6311 struct hclge_fd_rule *rule,
6312 struct hclge_fd_user_def_info *info)
6313 {
6314 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6315
6316 switch (flow_type) {
6317 case SCTP_V4_FLOW:
6318 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_SCTP);
6319 break;
6320 case TCP_V4_FLOW:
6321 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_TCP);
6322 break;
6323 case UDP_V4_FLOW:
6324 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_UDP);
6325 break;
6326 case IP_USER_FLOW:
6327 hclge_fd_get_ip4_tuple(fs, rule);
6328 break;
6329 case SCTP_V6_FLOW:
6330 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_SCTP);
6331 break;
6332 case TCP_V6_FLOW:
6333 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_TCP);
6334 break;
6335 case UDP_V6_FLOW:
6336 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_UDP);
6337 break;
6338 case IPV6_USER_FLOW:
6339 hclge_fd_get_ip6_tuple(fs, rule);
6340 break;
6341 case ETHER_FLOW:
6342 hclge_fd_get_ether_tuple(fs, rule);
6343 break;
6344 default:
6345 return -EOPNOTSUPP;
6346 }
6347
6348 if (fs->flow_type & FLOW_EXT) {
6349 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6350 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6351 hclge_fd_get_user_def_tuple(info, rule);
6352 }
6353
6354 if (fs->flow_type & FLOW_MAC_EXT) {
6355 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6356 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6357 }
6358
6359 return 0;
6360 }
6361
hclge_fd_config_rule(struct hclge_dev * hdev,struct hclge_fd_rule * rule)6362 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6363 struct hclge_fd_rule *rule)
6364 {
6365 int ret;
6366
6367 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6368 if (ret)
6369 return ret;
6370
6371 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6372 }
6373
hclge_add_fd_entry_common(struct hclge_dev * hdev,struct hclge_fd_rule * rule)6374 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6375 struct hclge_fd_rule *rule)
6376 {
6377 int ret;
6378
6379 spin_lock_bh(&hdev->fd_rule_lock);
6380
6381 if (hdev->fd_active_type != rule->rule_type &&
6382 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6383 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6384 dev_err(&hdev->pdev->dev,
6385 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6386 rule->rule_type, hdev->fd_active_type);
6387 spin_unlock_bh(&hdev->fd_rule_lock);
6388 return -EINVAL;
6389 }
6390
6391 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6392 if (ret)
6393 goto out;
6394
6395 ret = hclge_clear_arfs_rules(hdev);
6396 if (ret)
6397 goto out;
6398
6399 ret = hclge_fd_config_rule(hdev, rule);
6400 if (ret)
6401 goto out;
6402
6403 rule->state = HCLGE_FD_ACTIVE;
6404 hdev->fd_active_type = rule->rule_type;
6405 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6406
6407 out:
6408 spin_unlock_bh(&hdev->fd_rule_lock);
6409 return ret;
6410 }
6411
hclge_is_cls_flower_active(struct hnae3_handle * handle)6412 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6413 {
6414 struct hclge_vport *vport = hclge_get_vport(handle);
6415 struct hclge_dev *hdev = vport->back;
6416
6417 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6418 }
6419
hclge_fd_parse_ring_cookie(struct hclge_dev * hdev,u64 ring_cookie,u16 * vport_id,u8 * action,u16 * queue_id)6420 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6421 u16 *vport_id, u8 *action, u16 *queue_id)
6422 {
6423 struct hclge_vport *vport = hdev->vport;
6424
6425 if (ring_cookie == RX_CLS_FLOW_DISC) {
6426 *action = HCLGE_FD_ACTION_DROP_PACKET;
6427 } else {
6428 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6429 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6430 u16 tqps;
6431
6432 /* To keep consistent with user's configuration, minus 1 when
6433 * printing 'vf', because vf id from ethtool is added 1 for vf.
6434 */
6435 if (vf > hdev->num_req_vfs) {
6436 dev_err(&hdev->pdev->dev,
6437 "Error: vf id (%u) should be less than %u\n",
6438 vf - 1U, hdev->num_req_vfs);
6439 return -EINVAL;
6440 }
6441
6442 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6443 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6444
6445 if (ring >= tqps) {
6446 dev_err(&hdev->pdev->dev,
6447 "Error: queue id (%u) > max tqp num (%u)\n",
6448 ring, tqps - 1U);
6449 return -EINVAL;
6450 }
6451
6452 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6453 *queue_id = ring;
6454 }
6455
6456 return 0;
6457 }
6458
hclge_add_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6459 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6460 struct ethtool_rxnfc *cmd)
6461 {
6462 struct hclge_vport *vport = hclge_get_vport(handle);
6463 struct hclge_dev *hdev = vport->back;
6464 struct hclge_fd_user_def_info info;
6465 u16 dst_vport_id = 0, q_index = 0;
6466 struct ethtool_rx_flow_spec *fs;
6467 struct hclge_fd_rule *rule;
6468 u32 unused = 0;
6469 u8 action;
6470 int ret;
6471
6472 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
6473 dev_err(&hdev->pdev->dev,
6474 "flow table director is not supported\n");
6475 return -EOPNOTSUPP;
6476 }
6477
6478 if (!hdev->fd_en) {
6479 dev_err(&hdev->pdev->dev,
6480 "please enable flow director first\n");
6481 return -EOPNOTSUPP;
6482 }
6483
6484 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6485
6486 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6487 if (ret)
6488 return ret;
6489
6490 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6491 &action, &q_index);
6492 if (ret)
6493 return ret;
6494
6495 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6496 if (!rule)
6497 return -ENOMEM;
6498
6499 ret = hclge_fd_get_tuple(fs, rule, &info);
6500 if (ret) {
6501 kfree(rule);
6502 return ret;
6503 }
6504
6505 rule->flow_type = fs->flow_type;
6506 rule->location = fs->location;
6507 rule->unused_tuple = unused;
6508 rule->vf_id = dst_vport_id;
6509 rule->queue_id = q_index;
6510 rule->action = action;
6511 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6512
6513 ret = hclge_add_fd_entry_common(hdev, rule);
6514 if (ret)
6515 kfree(rule);
6516
6517 return ret;
6518 }
6519
hclge_del_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6520 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6521 struct ethtool_rxnfc *cmd)
6522 {
6523 struct hclge_vport *vport = hclge_get_vport(handle);
6524 struct hclge_dev *hdev = vport->back;
6525 struct ethtool_rx_flow_spec *fs;
6526 int ret;
6527
6528 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6529 return -EOPNOTSUPP;
6530
6531 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6532
6533 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6534 return -EINVAL;
6535
6536 spin_lock_bh(&hdev->fd_rule_lock);
6537 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6538 !test_bit(fs->location, hdev->fd_bmap)) {
6539 dev_err(&hdev->pdev->dev,
6540 "Delete fail, rule %u is inexistent\n", fs->location);
6541 spin_unlock_bh(&hdev->fd_rule_lock);
6542 return -ENOENT;
6543 }
6544
6545 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6546 NULL, false);
6547 if (ret)
6548 goto out;
6549
6550 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6551
6552 out:
6553 spin_unlock_bh(&hdev->fd_rule_lock);
6554 return ret;
6555 }
6556
hclge_clear_fd_rules_in_list(struct hclge_dev * hdev,bool clear_list)6557 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6558 bool clear_list)
6559 {
6560 struct hclge_fd_rule *rule;
6561 struct hlist_node *node;
6562 u16 location;
6563
6564 spin_lock_bh(&hdev->fd_rule_lock);
6565
6566 for_each_set_bit(location, hdev->fd_bmap,
6567 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6568 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6569 NULL, false);
6570
6571 if (clear_list) {
6572 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6573 rule_node) {
6574 hlist_del(&rule->rule_node);
6575 kfree(rule);
6576 }
6577 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6578 hdev->hclge_fd_rule_num = 0;
6579 bitmap_zero(hdev->fd_bmap,
6580 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6581 }
6582
6583 spin_unlock_bh(&hdev->fd_rule_lock);
6584 }
6585
hclge_del_all_fd_entries(struct hclge_dev * hdev)6586 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6587 {
6588 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6589 return;
6590
6591 hclge_clear_fd_rules_in_list(hdev, true);
6592 hclge_fd_disable_user_def(hdev);
6593 }
6594
hclge_restore_fd_entries(struct hnae3_handle * handle)6595 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6596 {
6597 struct hclge_vport *vport = hclge_get_vport(handle);
6598 struct hclge_dev *hdev = vport->back;
6599 struct hclge_fd_rule *rule;
6600 struct hlist_node *node;
6601
6602 /* Return ok here, because reset error handling will check this
6603 * return value. If error is returned here, the reset process will
6604 * fail.
6605 */
6606 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6607 return 0;
6608
6609 /* if fd is disabled, should not restore it when reset */
6610 if (!hdev->fd_en)
6611 return 0;
6612
6613 spin_lock_bh(&hdev->fd_rule_lock);
6614 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6615 if (rule->state == HCLGE_FD_ACTIVE)
6616 rule->state = HCLGE_FD_TO_ADD;
6617 }
6618 spin_unlock_bh(&hdev->fd_rule_lock);
6619 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6620
6621 return 0;
6622 }
6623
hclge_get_fd_rule_cnt(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6624 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6625 struct ethtool_rxnfc *cmd)
6626 {
6627 struct hclge_vport *vport = hclge_get_vport(handle);
6628 struct hclge_dev *hdev = vport->back;
6629
6630 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle))
6631 return -EOPNOTSUPP;
6632
6633 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6634 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6635
6636 return 0;
6637 }
6638
hclge_fd_get_tcpip4_info(struct hclge_fd_rule * rule,struct ethtool_tcpip4_spec * spec,struct ethtool_tcpip4_spec * spec_mask)6639 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6640 struct ethtool_tcpip4_spec *spec,
6641 struct ethtool_tcpip4_spec *spec_mask)
6642 {
6643 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6644 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6645 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6646
6647 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6648 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6649 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6650
6651 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6652 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6653 0 : cpu_to_be16(rule->tuples_mask.src_port);
6654
6655 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6656 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6657 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6658
6659 spec->tos = rule->tuples.ip_tos;
6660 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6661 0 : rule->tuples_mask.ip_tos;
6662 }
6663
hclge_fd_get_ip4_info(struct hclge_fd_rule * rule,struct ethtool_usrip4_spec * spec,struct ethtool_usrip4_spec * spec_mask)6664 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6665 struct ethtool_usrip4_spec *spec,
6666 struct ethtool_usrip4_spec *spec_mask)
6667 {
6668 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6669 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6670 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6671
6672 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6673 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6674 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6675
6676 spec->tos = rule->tuples.ip_tos;
6677 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6678 0 : rule->tuples_mask.ip_tos;
6679
6680 spec->proto = rule->tuples.ip_proto;
6681 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6682 0 : rule->tuples_mask.ip_proto;
6683
6684 spec->ip_ver = ETH_RX_NFC_IP4;
6685 }
6686
hclge_fd_get_tcpip6_info(struct hclge_fd_rule * rule,struct ethtool_tcpip6_spec * spec,struct ethtool_tcpip6_spec * spec_mask)6687 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6688 struct ethtool_tcpip6_spec *spec,
6689 struct ethtool_tcpip6_spec *spec_mask)
6690 {
6691 cpu_to_be32_array(spec->ip6src,
6692 rule->tuples.src_ip, IPV6_SIZE);
6693 cpu_to_be32_array(spec->ip6dst,
6694 rule->tuples.dst_ip, IPV6_SIZE);
6695 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6696 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6697 else
6698 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6699 IPV6_SIZE);
6700
6701 if (rule->unused_tuple & BIT(INNER_DST_IP))
6702 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6703 else
6704 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6705 IPV6_SIZE);
6706
6707 spec->tclass = rule->tuples.ip_tos;
6708 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6709 0 : rule->tuples_mask.ip_tos;
6710
6711 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6712 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6713 0 : cpu_to_be16(rule->tuples_mask.src_port);
6714
6715 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6716 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6717 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6718 }
6719
hclge_fd_get_ip6_info(struct hclge_fd_rule * rule,struct ethtool_usrip6_spec * spec,struct ethtool_usrip6_spec * spec_mask)6720 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6721 struct ethtool_usrip6_spec *spec,
6722 struct ethtool_usrip6_spec *spec_mask)
6723 {
6724 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6725 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6726 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6727 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6728 else
6729 cpu_to_be32_array(spec_mask->ip6src,
6730 rule->tuples_mask.src_ip, IPV6_SIZE);
6731
6732 if (rule->unused_tuple & BIT(INNER_DST_IP))
6733 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6734 else
6735 cpu_to_be32_array(spec_mask->ip6dst,
6736 rule->tuples_mask.dst_ip, IPV6_SIZE);
6737
6738 spec->tclass = rule->tuples.ip_tos;
6739 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6740 0 : rule->tuples_mask.ip_tos;
6741
6742 spec->l4_proto = rule->tuples.ip_proto;
6743 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6744 0 : rule->tuples_mask.ip_proto;
6745 }
6746
hclge_fd_get_ether_info(struct hclge_fd_rule * rule,struct ethhdr * spec,struct ethhdr * spec_mask)6747 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6748 struct ethhdr *spec,
6749 struct ethhdr *spec_mask)
6750 {
6751 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6752 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6753
6754 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6755 eth_zero_addr(spec_mask->h_source);
6756 else
6757 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6758
6759 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6760 eth_zero_addr(spec_mask->h_dest);
6761 else
6762 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6763
6764 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6765 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6766 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6767 }
6768
hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6769 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6770 struct hclge_fd_rule *rule)
6771 {
6772 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6773 HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6774 fs->h_ext.data[0] = 0;
6775 fs->h_ext.data[1] = 0;
6776 fs->m_ext.data[0] = 0;
6777 fs->m_ext.data[1] = 0;
6778 } else {
6779 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6780 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6781 fs->m_ext.data[0] =
6782 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6783 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6784 }
6785 }
6786
hclge_fd_get_ext_info(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6787 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6788 struct hclge_fd_rule *rule)
6789 {
6790 if (fs->flow_type & FLOW_EXT) {
6791 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6792 fs->m_ext.vlan_tci =
6793 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6794 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6795
6796 hclge_fd_get_user_def_info(fs, rule);
6797 }
6798
6799 if (fs->flow_type & FLOW_MAC_EXT) {
6800 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6801 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6802 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6803 else
6804 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6805 rule->tuples_mask.dst_mac);
6806 }
6807 }
6808
hclge_get_fd_rule(struct hclge_dev * hdev,u16 location)6809 static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
6810 u16 location)
6811 {
6812 struct hclge_fd_rule *rule = NULL;
6813 struct hlist_node *node2;
6814
6815 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6816 if (rule->location == location)
6817 return rule;
6818 else if (rule->location > location)
6819 return NULL;
6820 }
6821
6822 return NULL;
6823 }
6824
hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6825 static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs,
6826 struct hclge_fd_rule *rule)
6827 {
6828 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6829 fs->ring_cookie = RX_CLS_FLOW_DISC;
6830 } else {
6831 u64 vf_id;
6832
6833 fs->ring_cookie = rule->queue_id;
6834 vf_id = rule->vf_id;
6835 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6836 fs->ring_cookie |= vf_id;
6837 }
6838 }
6839
hclge_get_fd_rule_info(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6840 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6841 struct ethtool_rxnfc *cmd)
6842 {
6843 struct hclge_vport *vport = hclge_get_vport(handle);
6844 struct hclge_fd_rule *rule = NULL;
6845 struct hclge_dev *hdev = vport->back;
6846 struct ethtool_rx_flow_spec *fs;
6847
6848 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6849 return -EOPNOTSUPP;
6850
6851 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6852
6853 spin_lock_bh(&hdev->fd_rule_lock);
6854
6855 rule = hclge_get_fd_rule(hdev, fs->location);
6856 if (!rule) {
6857 spin_unlock_bh(&hdev->fd_rule_lock);
6858 return -ENOENT;
6859 }
6860
6861 fs->flow_type = rule->flow_type;
6862 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6863 case SCTP_V4_FLOW:
6864 case TCP_V4_FLOW:
6865 case UDP_V4_FLOW:
6866 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6867 &fs->m_u.tcp_ip4_spec);
6868 break;
6869 case IP_USER_FLOW:
6870 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6871 &fs->m_u.usr_ip4_spec);
6872 break;
6873 case SCTP_V6_FLOW:
6874 case TCP_V6_FLOW:
6875 case UDP_V6_FLOW:
6876 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6877 &fs->m_u.tcp_ip6_spec);
6878 break;
6879 case IPV6_USER_FLOW:
6880 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6881 &fs->m_u.usr_ip6_spec);
6882 break;
6883 /* The flow type of fd rule has been checked before adding in to rule
6884 * list. As other flow types have been handled, it must be ETHER_FLOW
6885 * for the default case
6886 */
6887 default:
6888 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6889 &fs->m_u.ether_spec);
6890 break;
6891 }
6892
6893 hclge_fd_get_ext_info(fs, rule);
6894
6895 hclge_fd_get_ring_cookie(fs, rule);
6896
6897 spin_unlock_bh(&hdev->fd_rule_lock);
6898
6899 return 0;
6900 }
6901
hclge_get_all_rules(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd,u32 * rule_locs)6902 static int hclge_get_all_rules(struct hnae3_handle *handle,
6903 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6904 {
6905 struct hclge_vport *vport = hclge_get_vport(handle);
6906 struct hclge_dev *hdev = vport->back;
6907 struct hclge_fd_rule *rule;
6908 struct hlist_node *node2;
6909 int cnt = 0;
6910
6911 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6912 return -EOPNOTSUPP;
6913
6914 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6915
6916 spin_lock_bh(&hdev->fd_rule_lock);
6917 hlist_for_each_entry_safe(rule, node2,
6918 &hdev->fd_rule_list, rule_node) {
6919 if (cnt == cmd->rule_cnt) {
6920 spin_unlock_bh(&hdev->fd_rule_lock);
6921 return -EMSGSIZE;
6922 }
6923
6924 if (rule->state == HCLGE_FD_TO_DEL)
6925 continue;
6926
6927 rule_locs[cnt] = rule->location;
6928 cnt++;
6929 }
6930
6931 spin_unlock_bh(&hdev->fd_rule_lock);
6932
6933 cmd->rule_cnt = cnt;
6934
6935 return 0;
6936 }
6937
hclge_fd_get_flow_tuples(const struct flow_keys * fkeys,struct hclge_fd_rule_tuples * tuples)6938 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6939 struct hclge_fd_rule_tuples *tuples)
6940 {
6941 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6942 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6943
6944 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6945 tuples->ip_proto = fkeys->basic.ip_proto;
6946 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6947
6948 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6949 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6950 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6951 } else {
6952 int i;
6953
6954 for (i = 0; i < IPV6_SIZE; i++) {
6955 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6956 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6957 }
6958 }
6959 }
6960
6961 /* traverse all rules, check whether an existed rule has the same tuples */
6962 static struct hclge_fd_rule *
hclge_fd_search_flow_keys(struct hclge_dev * hdev,const struct hclge_fd_rule_tuples * tuples)6963 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6964 const struct hclge_fd_rule_tuples *tuples)
6965 {
6966 struct hclge_fd_rule *rule = NULL;
6967 struct hlist_node *node;
6968
6969 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6970 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6971 return rule;
6972 }
6973
6974 return NULL;
6975 }
6976
hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples * tuples,struct hclge_fd_rule * rule)6977 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6978 struct hclge_fd_rule *rule)
6979 {
6980 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6981 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6982 BIT(INNER_SRC_PORT);
6983 rule->action = 0;
6984 rule->vf_id = 0;
6985 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6986 rule->state = HCLGE_FD_TO_ADD;
6987 if (tuples->ether_proto == ETH_P_IP) {
6988 if (tuples->ip_proto == IPPROTO_TCP)
6989 rule->flow_type = TCP_V4_FLOW;
6990 else
6991 rule->flow_type = UDP_V4_FLOW;
6992 } else {
6993 if (tuples->ip_proto == IPPROTO_TCP)
6994 rule->flow_type = TCP_V6_FLOW;
6995 else
6996 rule->flow_type = UDP_V6_FLOW;
6997 }
6998 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6999 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7000 }
7001
hclge_add_fd_entry_by_arfs(struct hnae3_handle * handle,u16 queue_id,u16 flow_id,struct flow_keys * fkeys)7002 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7003 u16 flow_id, struct flow_keys *fkeys)
7004 {
7005 struct hclge_vport *vport = hclge_get_vport(handle);
7006 struct hclge_fd_rule_tuples new_tuples = {};
7007 struct hclge_dev *hdev = vport->back;
7008 struct hclge_fd_rule *rule;
7009 u16 bit_id;
7010
7011 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7012 return -EOPNOTSUPP;
7013
7014 /* when there is already fd rule existed add by user,
7015 * arfs should not work
7016 */
7017 spin_lock_bh(&hdev->fd_rule_lock);
7018 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7019 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7020 spin_unlock_bh(&hdev->fd_rule_lock);
7021 return -EOPNOTSUPP;
7022 }
7023
7024 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7025
7026 /* check is there flow director filter existed for this flow,
7027 * if not, create a new filter for it;
7028 * if filter exist with different queue id, modify the filter;
7029 * if filter exist with same queue id, do nothing
7030 */
7031 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7032 if (!rule) {
7033 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7034 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7035 spin_unlock_bh(&hdev->fd_rule_lock);
7036 return -ENOSPC;
7037 }
7038
7039 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7040 if (!rule) {
7041 spin_unlock_bh(&hdev->fd_rule_lock);
7042 return -ENOMEM;
7043 }
7044
7045 rule->location = bit_id;
7046 rule->arfs.flow_id = flow_id;
7047 rule->queue_id = queue_id;
7048 hclge_fd_build_arfs_rule(&new_tuples, rule);
7049 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7050 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7051 } else if (rule->queue_id != queue_id) {
7052 rule->queue_id = queue_id;
7053 rule->state = HCLGE_FD_TO_ADD;
7054 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7055 hclge_task_schedule(hdev, 0);
7056 }
7057 spin_unlock_bh(&hdev->fd_rule_lock);
7058 return rule->location;
7059 }
7060
hclge_rfs_filter_expire(struct hclge_dev * hdev)7061 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7062 {
7063 #ifdef CONFIG_RFS_ACCEL
7064 struct hnae3_handle *handle = &hdev->vport[0].nic;
7065 struct hclge_fd_rule *rule;
7066 struct hlist_node *node;
7067
7068 spin_lock_bh(&hdev->fd_rule_lock);
7069 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7070 spin_unlock_bh(&hdev->fd_rule_lock);
7071 return;
7072 }
7073 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7074 if (rule->state != HCLGE_FD_ACTIVE)
7075 continue;
7076 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7077 rule->arfs.flow_id, rule->location)) {
7078 rule->state = HCLGE_FD_TO_DEL;
7079 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7080 }
7081 }
7082 spin_unlock_bh(&hdev->fd_rule_lock);
7083 #endif
7084 }
7085
7086 /* make sure being called after lock up with fd_rule_lock */
hclge_clear_arfs_rules(struct hclge_dev * hdev)7087 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7088 {
7089 #ifdef CONFIG_RFS_ACCEL
7090 struct hclge_fd_rule *rule;
7091 struct hlist_node *node;
7092 int ret;
7093
7094 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7095 return 0;
7096
7097 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7098 switch (rule->state) {
7099 case HCLGE_FD_TO_DEL:
7100 case HCLGE_FD_ACTIVE:
7101 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7102 rule->location, NULL, false);
7103 if (ret)
7104 return ret;
7105 fallthrough;
7106 case HCLGE_FD_TO_ADD:
7107 hclge_fd_dec_rule_cnt(hdev, rule->location);
7108 hlist_del(&rule->rule_node);
7109 kfree(rule);
7110 break;
7111 default:
7112 break;
7113 }
7114 }
7115 hclge_sync_fd_state(hdev);
7116
7117 #endif
7118 return 0;
7119 }
7120
hclge_get_cls_key_basic(const struct flow_rule * flow,struct hclge_fd_rule * rule)7121 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7122 struct hclge_fd_rule *rule)
7123 {
7124 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7125 struct flow_match_basic match;
7126 u16 ethtype_key, ethtype_mask;
7127
7128 flow_rule_match_basic(flow, &match);
7129 ethtype_key = ntohs(match.key->n_proto);
7130 ethtype_mask = ntohs(match.mask->n_proto);
7131
7132 if (ethtype_key == ETH_P_ALL) {
7133 ethtype_key = 0;
7134 ethtype_mask = 0;
7135 }
7136 rule->tuples.ether_proto = ethtype_key;
7137 rule->tuples_mask.ether_proto = ethtype_mask;
7138 rule->tuples.ip_proto = match.key->ip_proto;
7139 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7140 } else {
7141 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7142 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7143 }
7144 }
7145
hclge_get_cls_key_mac(const struct flow_rule * flow,struct hclge_fd_rule * rule)7146 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7147 struct hclge_fd_rule *rule)
7148 {
7149 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7150 struct flow_match_eth_addrs match;
7151
7152 flow_rule_match_eth_addrs(flow, &match);
7153 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7154 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7155 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7156 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7157 } else {
7158 rule->unused_tuple |= BIT(INNER_DST_MAC);
7159 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7160 }
7161 }
7162
hclge_get_cls_key_vlan(const struct flow_rule * flow,struct hclge_fd_rule * rule)7163 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7164 struct hclge_fd_rule *rule)
7165 {
7166 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7167 struct flow_match_vlan match;
7168
7169 flow_rule_match_vlan(flow, &match);
7170 rule->tuples.vlan_tag1 = match.key->vlan_id |
7171 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7172 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7173 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7174 } else {
7175 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7176 }
7177 }
7178
hclge_get_cls_key_ip(const struct flow_rule * flow,struct hclge_fd_rule * rule)7179 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7180 struct hclge_fd_rule *rule)
7181 {
7182 u16 addr_type = 0;
7183
7184 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7185 struct flow_match_control match;
7186
7187 flow_rule_match_control(flow, &match);
7188 addr_type = match.key->addr_type;
7189 }
7190
7191 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7192 struct flow_match_ipv4_addrs match;
7193
7194 flow_rule_match_ipv4_addrs(flow, &match);
7195 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7196 rule->tuples_mask.src_ip[IPV4_INDEX] =
7197 be32_to_cpu(match.mask->src);
7198 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7199 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7200 be32_to_cpu(match.mask->dst);
7201 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7202 struct flow_match_ipv6_addrs match;
7203
7204 flow_rule_match_ipv6_addrs(flow, &match);
7205 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7206 IPV6_SIZE);
7207 be32_to_cpu_array(rule->tuples_mask.src_ip,
7208 match.mask->src.s6_addr32, IPV6_SIZE);
7209 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7210 IPV6_SIZE);
7211 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7212 match.mask->dst.s6_addr32, IPV6_SIZE);
7213 } else {
7214 rule->unused_tuple |= BIT(INNER_SRC_IP);
7215 rule->unused_tuple |= BIT(INNER_DST_IP);
7216 }
7217 }
7218
hclge_get_cls_key_port(const struct flow_rule * flow,struct hclge_fd_rule * rule)7219 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7220 struct hclge_fd_rule *rule)
7221 {
7222 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7223 struct flow_match_ports match;
7224
7225 flow_rule_match_ports(flow, &match);
7226
7227 rule->tuples.src_port = be16_to_cpu(match.key->src);
7228 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7229 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7230 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7231 } else {
7232 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7233 rule->unused_tuple |= BIT(INNER_DST_PORT);
7234 }
7235 }
7236
hclge_parse_cls_flower(struct hclge_dev * hdev,struct flow_cls_offload * cls_flower,struct hclge_fd_rule * rule)7237 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7238 struct flow_cls_offload *cls_flower,
7239 struct hclge_fd_rule *rule)
7240 {
7241 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7242 struct flow_dissector *dissector = flow->match.dissector;
7243
7244 if (dissector->used_keys &
7245 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
7246 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
7247 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7248 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
7249 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7250 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7251 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) {
7252 dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n",
7253 dissector->used_keys);
7254 return -EOPNOTSUPP;
7255 }
7256
7257 hclge_get_cls_key_basic(flow, rule);
7258 hclge_get_cls_key_mac(flow, rule);
7259 hclge_get_cls_key_vlan(flow, rule);
7260 hclge_get_cls_key_ip(flow, rule);
7261 hclge_get_cls_key_port(flow, rule);
7262
7263 return 0;
7264 }
7265
hclge_check_cls_flower(struct hclge_dev * hdev,struct flow_cls_offload * cls_flower,int tc)7266 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7267 struct flow_cls_offload *cls_flower, int tc)
7268 {
7269 u32 prio = cls_flower->common.prio;
7270
7271 if (tc < 0 || tc > hdev->tc_max) {
7272 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7273 return -EINVAL;
7274 }
7275
7276 if (prio == 0 ||
7277 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7278 dev_err(&hdev->pdev->dev,
7279 "prio %u should be in range[1, %u]\n",
7280 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7281 return -EINVAL;
7282 }
7283
7284 if (test_bit(prio - 1, hdev->fd_bmap)) {
7285 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7286 return -EINVAL;
7287 }
7288 return 0;
7289 }
7290
hclge_add_cls_flower(struct hnae3_handle * handle,struct flow_cls_offload * cls_flower,int tc)7291 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7292 struct flow_cls_offload *cls_flower,
7293 int tc)
7294 {
7295 struct hclge_vport *vport = hclge_get_vport(handle);
7296 struct hclge_dev *hdev = vport->back;
7297 struct hclge_fd_rule *rule;
7298 int ret;
7299
7300 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
7301 dev_err(&hdev->pdev->dev,
7302 "cls flower is not supported\n");
7303 return -EOPNOTSUPP;
7304 }
7305
7306 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7307 if (ret) {
7308 dev_err(&hdev->pdev->dev,
7309 "failed to check cls flower params, ret = %d\n", ret);
7310 return ret;
7311 }
7312
7313 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7314 if (!rule)
7315 return -ENOMEM;
7316
7317 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7318 if (ret) {
7319 kfree(rule);
7320 return ret;
7321 }
7322
7323 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7324 rule->cls_flower.tc = tc;
7325 rule->location = cls_flower->common.prio - 1;
7326 rule->vf_id = 0;
7327 rule->cls_flower.cookie = cls_flower->cookie;
7328 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7329
7330 ret = hclge_add_fd_entry_common(hdev, rule);
7331 if (ret)
7332 kfree(rule);
7333
7334 return ret;
7335 }
7336
hclge_find_cls_flower(struct hclge_dev * hdev,unsigned long cookie)7337 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7338 unsigned long cookie)
7339 {
7340 struct hclge_fd_rule *rule;
7341 struct hlist_node *node;
7342
7343 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7344 if (rule->cls_flower.cookie == cookie)
7345 return rule;
7346 }
7347
7348 return NULL;
7349 }
7350
hclge_del_cls_flower(struct hnae3_handle * handle,struct flow_cls_offload * cls_flower)7351 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7352 struct flow_cls_offload *cls_flower)
7353 {
7354 struct hclge_vport *vport = hclge_get_vport(handle);
7355 struct hclge_dev *hdev = vport->back;
7356 struct hclge_fd_rule *rule;
7357 int ret;
7358
7359 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7360 return -EOPNOTSUPP;
7361
7362 spin_lock_bh(&hdev->fd_rule_lock);
7363
7364 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7365 if (!rule) {
7366 spin_unlock_bh(&hdev->fd_rule_lock);
7367 return -EINVAL;
7368 }
7369
7370 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7371 NULL, false);
7372 if (ret) {
7373 /* if tcam config fail, set rule state to TO_DEL,
7374 * so the rule will be deleted when periodic
7375 * task being scheduled.
7376 */
7377 hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL);
7378 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7379 spin_unlock_bh(&hdev->fd_rule_lock);
7380 return ret;
7381 }
7382
7383 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7384 spin_unlock_bh(&hdev->fd_rule_lock);
7385
7386 return 0;
7387 }
7388
hclge_sync_fd_list(struct hclge_dev * hdev,struct hlist_head * hlist)7389 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7390 {
7391 struct hclge_fd_rule *rule;
7392 struct hlist_node *node;
7393 int ret = 0;
7394
7395 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7396 return;
7397
7398 spin_lock_bh(&hdev->fd_rule_lock);
7399
7400 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7401 switch (rule->state) {
7402 case HCLGE_FD_TO_ADD:
7403 ret = hclge_fd_config_rule(hdev, rule);
7404 if (ret)
7405 goto out;
7406 rule->state = HCLGE_FD_ACTIVE;
7407 break;
7408 case HCLGE_FD_TO_DEL:
7409 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7410 rule->location, NULL, false);
7411 if (ret)
7412 goto out;
7413 hclge_fd_dec_rule_cnt(hdev, rule->location);
7414 hclge_fd_free_node(hdev, rule);
7415 break;
7416 default:
7417 break;
7418 }
7419 }
7420
7421 out:
7422 if (ret)
7423 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7424
7425 spin_unlock_bh(&hdev->fd_rule_lock);
7426 }
7427
hclge_sync_fd_table(struct hclge_dev * hdev)7428 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7429 {
7430 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7431 return;
7432
7433 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7434 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7435
7436 hclge_clear_fd_rules_in_list(hdev, clear_list);
7437 }
7438
7439 hclge_sync_fd_user_def_cfg(hdev, false);
7440
7441 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7442 }
7443
hclge_get_hw_reset_stat(struct hnae3_handle * handle)7444 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7445 {
7446 struct hclge_vport *vport = hclge_get_vport(handle);
7447 struct hclge_dev *hdev = vport->back;
7448
7449 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7450 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7451 }
7452
hclge_get_cmdq_stat(struct hnae3_handle * handle)7453 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7454 {
7455 struct hclge_vport *vport = hclge_get_vport(handle);
7456 struct hclge_dev *hdev = vport->back;
7457
7458 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
7459 }
7460
hclge_ae_dev_resetting(struct hnae3_handle * handle)7461 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7462 {
7463 struct hclge_vport *vport = hclge_get_vport(handle);
7464 struct hclge_dev *hdev = vport->back;
7465
7466 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7467 }
7468
hclge_ae_dev_reset_cnt(struct hnae3_handle * handle)7469 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7470 {
7471 struct hclge_vport *vport = hclge_get_vport(handle);
7472 struct hclge_dev *hdev = vport->back;
7473
7474 return hdev->rst_stats.hw_reset_done_cnt;
7475 }
7476
hclge_enable_fd(struct hnae3_handle * handle,bool enable)7477 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7478 {
7479 struct hclge_vport *vport = hclge_get_vport(handle);
7480 struct hclge_dev *hdev = vport->back;
7481
7482 hdev->fd_en = enable;
7483
7484 if (!enable)
7485 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7486 else
7487 hclge_restore_fd_entries(handle);
7488
7489 hclge_task_schedule(hdev, 0);
7490 }
7491
hclge_cfg_mac_mode(struct hclge_dev * hdev,bool enable)7492 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7493 {
7494 #define HCLGE_LINK_STATUS_WAIT_CNT 3
7495
7496 struct hclge_desc desc;
7497 struct hclge_config_mac_mode_cmd *req =
7498 (struct hclge_config_mac_mode_cmd *)desc.data;
7499 u32 loop_en = 0;
7500 int ret;
7501
7502 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7503
7504 if (enable) {
7505 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7506 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7507 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7508 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7509 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7510 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7511 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7512 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7513 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7514 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7515 }
7516
7517 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7518
7519 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7520 if (ret) {
7521 dev_err(&hdev->pdev->dev,
7522 "mac enable fail, ret =%d.\n", ret);
7523 return;
7524 }
7525
7526 if (!enable)
7527 hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN,
7528 HCLGE_LINK_STATUS_WAIT_CNT);
7529 }
7530
hclge_config_switch_param(struct hclge_dev * hdev,int vfid,u8 switch_param,u8 param_mask)7531 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7532 u8 switch_param, u8 param_mask)
7533 {
7534 struct hclge_mac_vlan_switch_cmd *req;
7535 struct hclge_desc desc;
7536 u32 func_id;
7537 int ret;
7538
7539 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7540 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7541
7542 /* read current config parameter */
7543 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7544 true);
7545 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7546 req->func_id = cpu_to_le32(func_id);
7547
7548 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7549 if (ret) {
7550 dev_err(&hdev->pdev->dev,
7551 "read mac vlan switch parameter fail, ret = %d\n", ret);
7552 return ret;
7553 }
7554
7555 /* modify and write new config parameter */
7556 hclge_comm_cmd_reuse_desc(&desc, false);
7557 req->switch_param = (req->switch_param & param_mask) | switch_param;
7558 req->param_mask = param_mask;
7559
7560 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7561 if (ret)
7562 dev_err(&hdev->pdev->dev,
7563 "set mac vlan switch parameter fail, ret = %d\n", ret);
7564 return ret;
7565 }
7566
hclge_phy_link_status_wait(struct hclge_dev * hdev,int link_ret)7567 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7568 int link_ret)
7569 {
7570 #define HCLGE_PHY_LINK_STATUS_NUM 200
7571
7572 struct phy_device *phydev = hdev->hw.mac.phydev;
7573 int i = 0;
7574 int ret;
7575
7576 do {
7577 ret = phy_read_status(phydev);
7578 if (ret) {
7579 dev_err(&hdev->pdev->dev,
7580 "phy update link status fail, ret = %d\n", ret);
7581 return;
7582 }
7583
7584 if (phydev->link == link_ret)
7585 break;
7586
7587 msleep(HCLGE_LINK_STATUS_MS);
7588 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7589 }
7590
hclge_mac_link_status_wait(struct hclge_dev * hdev,int link_ret,int wait_cnt)7591 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
7592 int wait_cnt)
7593 {
7594 int link_status;
7595 int i = 0;
7596 int ret;
7597
7598 do {
7599 ret = hclge_get_mac_link_status(hdev, &link_status);
7600 if (ret)
7601 return ret;
7602 if (link_status == link_ret)
7603 return 0;
7604
7605 msleep(HCLGE_LINK_STATUS_MS);
7606 } while (++i < wait_cnt);
7607 return -EBUSY;
7608 }
7609
hclge_mac_phy_link_status_wait(struct hclge_dev * hdev,bool en,bool is_phy)7610 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7611 bool is_phy)
7612 {
7613 #define HCLGE_MAC_LINK_STATUS_NUM 100
7614
7615 int link_ret;
7616
7617 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7618
7619 if (is_phy)
7620 hclge_phy_link_status_wait(hdev, link_ret);
7621
7622 return hclge_mac_link_status_wait(hdev, link_ret,
7623 HCLGE_MAC_LINK_STATUS_NUM);
7624 }
7625
hclge_set_app_loopback(struct hclge_dev * hdev,bool en)7626 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7627 {
7628 struct hclge_config_mac_mode_cmd *req;
7629 struct hclge_desc desc;
7630 u32 loop_en;
7631 int ret;
7632
7633 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7634 /* 1 Read out the MAC mode config at first */
7635 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7636 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7637 if (ret) {
7638 dev_err(&hdev->pdev->dev,
7639 "mac loopback get fail, ret =%d.\n", ret);
7640 return ret;
7641 }
7642
7643 /* 2 Then setup the loopback flag */
7644 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7645 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7646
7647 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7648
7649 /* 3 Config mac work mode with loopback flag
7650 * and its original configure parameters
7651 */
7652 hclge_comm_cmd_reuse_desc(&desc, false);
7653 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7654 if (ret)
7655 dev_err(&hdev->pdev->dev,
7656 "mac loopback set fail, ret =%d.\n", ret);
7657 return ret;
7658 }
7659
hclge_cfg_common_loopback_cmd_send(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)7660 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en,
7661 enum hnae3_loop loop_mode)
7662 {
7663 struct hclge_common_lb_cmd *req;
7664 struct hclge_desc desc;
7665 u8 loop_mode_b;
7666 int ret;
7667
7668 req = (struct hclge_common_lb_cmd *)desc.data;
7669 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7670
7671 switch (loop_mode) {
7672 case HNAE3_LOOP_SERIAL_SERDES:
7673 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7674 break;
7675 case HNAE3_LOOP_PARALLEL_SERDES:
7676 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7677 break;
7678 case HNAE3_LOOP_PHY:
7679 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7680 break;
7681 default:
7682 dev_err(&hdev->pdev->dev,
7683 "unsupported loopback mode %d\n", loop_mode);
7684 return -ENOTSUPP;
7685 }
7686
7687 req->mask = loop_mode_b;
7688 if (en)
7689 req->enable = loop_mode_b;
7690
7691 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7692 if (ret)
7693 dev_err(&hdev->pdev->dev,
7694 "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
7695 loop_mode, ret);
7696
7697 return ret;
7698 }
7699
hclge_cfg_common_loopback_wait(struct hclge_dev * hdev)7700 static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev)
7701 {
7702 #define HCLGE_COMMON_LB_RETRY_MS 10
7703 #define HCLGE_COMMON_LB_RETRY_NUM 100
7704
7705 struct hclge_common_lb_cmd *req;
7706 struct hclge_desc desc;
7707 u32 i = 0;
7708 int ret;
7709
7710 req = (struct hclge_common_lb_cmd *)desc.data;
7711
7712 do {
7713 msleep(HCLGE_COMMON_LB_RETRY_MS);
7714 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7715 true);
7716 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7717 if (ret) {
7718 dev_err(&hdev->pdev->dev,
7719 "failed to get loopback done status, ret = %d\n",
7720 ret);
7721 return ret;
7722 }
7723 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7724 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7725
7726 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7727 dev_err(&hdev->pdev->dev, "wait loopback timeout\n");
7728 return -EBUSY;
7729 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7730 dev_err(&hdev->pdev->dev, "failed to do loopback test\n");
7731 return -EIO;
7732 }
7733
7734 return 0;
7735 }
7736
hclge_cfg_common_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)7737 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7738 enum hnae3_loop loop_mode)
7739 {
7740 int ret;
7741
7742 ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode);
7743 if (ret)
7744 return ret;
7745
7746 return hclge_cfg_common_loopback_wait(hdev);
7747 }
7748
hclge_set_common_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)7749 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7750 enum hnae3_loop loop_mode)
7751 {
7752 int ret;
7753
7754 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7755 if (ret)
7756 return ret;
7757
7758 hclge_cfg_mac_mode(hdev, en);
7759
7760 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7761 if (ret)
7762 dev_err(&hdev->pdev->dev,
7763 "serdes loopback config mac mode timeout\n");
7764
7765 return ret;
7766 }
7767
hclge_enable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)7768 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7769 struct phy_device *phydev)
7770 {
7771 int ret;
7772
7773 if (!phydev->suspended) {
7774 ret = phy_suspend(phydev);
7775 if (ret)
7776 return ret;
7777 }
7778
7779 ret = phy_resume(phydev);
7780 if (ret)
7781 return ret;
7782
7783 return phy_loopback(phydev, true);
7784 }
7785
hclge_disable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)7786 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7787 struct phy_device *phydev)
7788 {
7789 int ret;
7790
7791 ret = phy_loopback(phydev, false);
7792 if (ret)
7793 return ret;
7794
7795 return phy_suspend(phydev);
7796 }
7797
hclge_set_phy_loopback(struct hclge_dev * hdev,bool en)7798 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7799 {
7800 struct phy_device *phydev = hdev->hw.mac.phydev;
7801 int ret;
7802
7803 if (!phydev) {
7804 if (hnae3_dev_phy_imp_supported(hdev))
7805 return hclge_set_common_loopback(hdev, en,
7806 HNAE3_LOOP_PHY);
7807 return -ENOTSUPP;
7808 }
7809
7810 if (en)
7811 ret = hclge_enable_phy_loopback(hdev, phydev);
7812 else
7813 ret = hclge_disable_phy_loopback(hdev, phydev);
7814 if (ret) {
7815 dev_err(&hdev->pdev->dev,
7816 "set phy loopback fail, ret = %d\n", ret);
7817 return ret;
7818 }
7819
7820 hclge_cfg_mac_mode(hdev, en);
7821
7822 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7823 if (ret)
7824 dev_err(&hdev->pdev->dev,
7825 "phy loopback config mac mode timeout\n");
7826
7827 return ret;
7828 }
7829
hclge_tqp_enable_cmd_send(struct hclge_dev * hdev,u16 tqp_id,u16 stream_id,bool enable)7830 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7831 u16 stream_id, bool enable)
7832 {
7833 struct hclge_desc desc;
7834 struct hclge_cfg_com_tqp_queue_cmd *req =
7835 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7836
7837 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7838 req->tqp_id = cpu_to_le16(tqp_id);
7839 req->stream_id = cpu_to_le16(stream_id);
7840 if (enable)
7841 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7842
7843 return hclge_cmd_send(&hdev->hw, &desc, 1);
7844 }
7845
hclge_tqp_enable(struct hnae3_handle * handle,bool enable)7846 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7847 {
7848 struct hclge_vport *vport = hclge_get_vport(handle);
7849 struct hclge_dev *hdev = vport->back;
7850 int ret;
7851 u16 i;
7852
7853 for (i = 0; i < handle->kinfo.num_tqps; i++) {
7854 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7855 if (ret)
7856 return ret;
7857 }
7858 return 0;
7859 }
7860
hclge_set_loopback(struct hnae3_handle * handle,enum hnae3_loop loop_mode,bool en)7861 static int hclge_set_loopback(struct hnae3_handle *handle,
7862 enum hnae3_loop loop_mode, bool en)
7863 {
7864 struct hclge_vport *vport = hclge_get_vport(handle);
7865 struct hclge_dev *hdev = vport->back;
7866 int ret = 0;
7867
7868 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7869 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7870 * the same, the packets are looped back in the SSU. If SSU loopback
7871 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7872 */
7873 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7874 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7875
7876 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7877 HCLGE_SWITCH_ALW_LPBK_MASK);
7878 if (ret)
7879 return ret;
7880 }
7881
7882 switch (loop_mode) {
7883 case HNAE3_LOOP_APP:
7884 ret = hclge_set_app_loopback(hdev, en);
7885 break;
7886 case HNAE3_LOOP_SERIAL_SERDES:
7887 case HNAE3_LOOP_PARALLEL_SERDES:
7888 ret = hclge_set_common_loopback(hdev, en, loop_mode);
7889 break;
7890 case HNAE3_LOOP_PHY:
7891 ret = hclge_set_phy_loopback(hdev, en);
7892 break;
7893 case HNAE3_LOOP_EXTERNAL:
7894 break;
7895 default:
7896 ret = -ENOTSUPP;
7897 dev_err(&hdev->pdev->dev,
7898 "loop_mode %d is not supported\n", loop_mode);
7899 break;
7900 }
7901
7902 if (ret)
7903 return ret;
7904
7905 ret = hclge_tqp_enable(handle, en);
7906 if (ret)
7907 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7908 en ? "enable" : "disable", ret);
7909
7910 return ret;
7911 }
7912
hclge_set_default_loopback(struct hclge_dev * hdev)7913 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7914 {
7915 int ret;
7916
7917 ret = hclge_set_app_loopback(hdev, false);
7918 if (ret)
7919 return ret;
7920
7921 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7922 if (ret)
7923 return ret;
7924
7925 return hclge_cfg_common_loopback(hdev, false,
7926 HNAE3_LOOP_PARALLEL_SERDES);
7927 }
7928
hclge_flush_link_update(struct hclge_dev * hdev)7929 static void hclge_flush_link_update(struct hclge_dev *hdev)
7930 {
7931 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
7932
7933 unsigned long last = hdev->serv_processed_cnt;
7934 int i = 0;
7935
7936 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7937 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7938 last == hdev->serv_processed_cnt)
7939 usleep_range(1, 1);
7940 }
7941
hclge_set_timer_task(struct hnae3_handle * handle,bool enable)7942 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7943 {
7944 struct hclge_vport *vport = hclge_get_vport(handle);
7945 struct hclge_dev *hdev = vport->back;
7946
7947 if (enable) {
7948 hclge_task_schedule(hdev, 0);
7949 } else {
7950 /* Set the DOWN flag here to disable link updating */
7951 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7952
7953 smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
7954 hclge_flush_link_update(hdev);
7955 }
7956 }
7957
hclge_ae_start(struct hnae3_handle * handle)7958 static int hclge_ae_start(struct hnae3_handle *handle)
7959 {
7960 struct hclge_vport *vport = hclge_get_vport(handle);
7961 struct hclge_dev *hdev = vport->back;
7962
7963 /* mac enable */
7964 hclge_cfg_mac_mode(hdev, true);
7965 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7966 hdev->hw.mac.link = 0;
7967
7968 /* reset tqp stats */
7969 hclge_comm_reset_tqp_stats(handle);
7970
7971 hclge_mac_start_phy(hdev);
7972
7973 return 0;
7974 }
7975
hclge_ae_stop(struct hnae3_handle * handle)7976 static void hclge_ae_stop(struct hnae3_handle *handle)
7977 {
7978 struct hclge_vport *vport = hclge_get_vport(handle);
7979 struct hclge_dev *hdev = vport->back;
7980
7981 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7982 spin_lock_bh(&hdev->fd_rule_lock);
7983 hclge_clear_arfs_rules(hdev);
7984 spin_unlock_bh(&hdev->fd_rule_lock);
7985
7986 /* If it is not PF reset or FLR, the firmware will disable the MAC,
7987 * so it only need to stop phy here.
7988 */
7989 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
7990 hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
7991 HCLGE_PFC_DISABLE);
7992 if (hdev->reset_type != HNAE3_FUNC_RESET &&
7993 hdev->reset_type != HNAE3_FLR_RESET) {
7994 hclge_mac_stop_phy(hdev);
7995 hclge_update_link_status(hdev);
7996 return;
7997 }
7998 }
7999
8000 hclge_reset_tqp(handle);
8001
8002 hclge_config_mac_tnl_int(hdev, false);
8003
8004 /* Mac disable */
8005 hclge_cfg_mac_mode(hdev, false);
8006
8007 hclge_mac_stop_phy(hdev);
8008
8009 /* reset tqp stats */
8010 hclge_comm_reset_tqp_stats(handle);
8011 hclge_update_link_status(hdev);
8012 }
8013
hclge_vport_start(struct hclge_vport * vport)8014 int hclge_vport_start(struct hclge_vport *vport)
8015 {
8016 struct hclge_dev *hdev = vport->back;
8017
8018 set_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
8019 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8020 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8021 vport->last_active_jiffies = jiffies;
8022 vport->need_notify = 0;
8023
8024 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8025 if (vport->vport_id) {
8026 hclge_restore_mac_table_common(vport);
8027 hclge_restore_vport_vlan_table(vport);
8028 } else {
8029 hclge_restore_hw_table(hdev);
8030 }
8031 }
8032
8033 clear_bit(vport->vport_id, hdev->vport_config_block);
8034
8035 return 0;
8036 }
8037
hclge_vport_stop(struct hclge_vport * vport)8038 void hclge_vport_stop(struct hclge_vport *vport)
8039 {
8040 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
8041 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8042 vport->need_notify = 0;
8043 }
8044
hclge_client_start(struct hnae3_handle * handle)8045 static int hclge_client_start(struct hnae3_handle *handle)
8046 {
8047 struct hclge_vport *vport = hclge_get_vport(handle);
8048
8049 return hclge_vport_start(vport);
8050 }
8051
hclge_client_stop(struct hnae3_handle * handle)8052 static void hclge_client_stop(struct hnae3_handle *handle)
8053 {
8054 struct hclge_vport *vport = hclge_get_vport(handle);
8055
8056 hclge_vport_stop(vport);
8057 }
8058
hclge_get_mac_vlan_cmd_status(struct hclge_vport * vport,u16 cmdq_resp,u8 resp_code,enum hclge_mac_vlan_tbl_opcode op)8059 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8060 u16 cmdq_resp, u8 resp_code,
8061 enum hclge_mac_vlan_tbl_opcode op)
8062 {
8063 struct hclge_dev *hdev = vport->back;
8064
8065 if (cmdq_resp) {
8066 dev_err(&hdev->pdev->dev,
8067 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8068 cmdq_resp);
8069 return -EIO;
8070 }
8071
8072 if (op == HCLGE_MAC_VLAN_ADD) {
8073 if (!resp_code || resp_code == 1)
8074 return 0;
8075 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8076 resp_code == HCLGE_ADD_MC_OVERFLOW)
8077 return -ENOSPC;
8078
8079 dev_err(&hdev->pdev->dev,
8080 "add mac addr failed for undefined, code=%u.\n",
8081 resp_code);
8082 return -EIO;
8083 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8084 if (!resp_code) {
8085 return 0;
8086 } else if (resp_code == 1) {
8087 dev_dbg(&hdev->pdev->dev,
8088 "remove mac addr failed for miss.\n");
8089 return -ENOENT;
8090 }
8091
8092 dev_err(&hdev->pdev->dev,
8093 "remove mac addr failed for undefined, code=%u.\n",
8094 resp_code);
8095 return -EIO;
8096 } else if (op == HCLGE_MAC_VLAN_LKUP) {
8097 if (!resp_code) {
8098 return 0;
8099 } else if (resp_code == 1) {
8100 dev_dbg(&hdev->pdev->dev,
8101 "lookup mac addr failed for miss.\n");
8102 return -ENOENT;
8103 }
8104
8105 dev_err(&hdev->pdev->dev,
8106 "lookup mac addr failed for undefined, code=%u.\n",
8107 resp_code);
8108 return -EIO;
8109 }
8110
8111 dev_err(&hdev->pdev->dev,
8112 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8113
8114 return -EINVAL;
8115 }
8116
hclge_update_desc_vfid(struct hclge_desc * desc,int vfid,bool clr)8117 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8118 {
8119 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8120
8121 unsigned int word_num;
8122 unsigned int bit_num;
8123
8124 if (vfid > 255 || vfid < 0)
8125 return -EIO;
8126
8127 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8128 word_num = vfid / 32;
8129 bit_num = vfid % 32;
8130 if (clr)
8131 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8132 else
8133 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8134 } else {
8135 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8136 bit_num = vfid % 32;
8137 if (clr)
8138 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8139 else
8140 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8141 }
8142
8143 return 0;
8144 }
8145
hclge_is_all_function_id_zero(struct hclge_desc * desc)8146 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8147 {
8148 #define HCLGE_DESC_NUMBER 3
8149 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8150 int i, j;
8151
8152 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8153 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8154 if (desc[i].data[j])
8155 return false;
8156
8157 return true;
8158 }
8159
hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd * new_req,const u8 * addr,bool is_mc)8160 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8161 const u8 *addr, bool is_mc)
8162 {
8163 const unsigned char *mac_addr = addr;
8164 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8165 (mac_addr[0]) | (mac_addr[1] << 8);
8166 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
8167
8168 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8169 if (is_mc) {
8170 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8171 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8172 }
8173
8174 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8175 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8176 }
8177
hclge_remove_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req)8178 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8179 struct hclge_mac_vlan_tbl_entry_cmd *req)
8180 {
8181 struct hclge_dev *hdev = vport->back;
8182 struct hclge_desc desc;
8183 u8 resp_code;
8184 u16 retval;
8185 int ret;
8186
8187 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8188
8189 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8190
8191 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8192 if (ret) {
8193 dev_err(&hdev->pdev->dev,
8194 "del mac addr failed for cmd_send, ret =%d.\n",
8195 ret);
8196 return ret;
8197 }
8198 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8199 retval = le16_to_cpu(desc.retval);
8200
8201 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8202 HCLGE_MAC_VLAN_REMOVE);
8203 }
8204
hclge_lookup_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * desc,bool is_mc)8205 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8206 struct hclge_mac_vlan_tbl_entry_cmd *req,
8207 struct hclge_desc *desc,
8208 bool is_mc)
8209 {
8210 struct hclge_dev *hdev = vport->back;
8211 u8 resp_code;
8212 u16 retval;
8213 int ret;
8214
8215 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8216 if (is_mc) {
8217 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8218 memcpy(desc[0].data,
8219 req,
8220 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8221 hclge_cmd_setup_basic_desc(&desc[1],
8222 HCLGE_OPC_MAC_VLAN_ADD,
8223 true);
8224 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8225 hclge_cmd_setup_basic_desc(&desc[2],
8226 HCLGE_OPC_MAC_VLAN_ADD,
8227 true);
8228 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8229 } else {
8230 memcpy(desc[0].data,
8231 req,
8232 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8233 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8234 }
8235 if (ret) {
8236 dev_err(&hdev->pdev->dev,
8237 "lookup mac addr failed for cmd_send, ret =%d.\n",
8238 ret);
8239 return ret;
8240 }
8241 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8242 retval = le16_to_cpu(desc[0].retval);
8243
8244 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8245 HCLGE_MAC_VLAN_LKUP);
8246 }
8247
hclge_add_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * mc_desc)8248 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8249 struct hclge_mac_vlan_tbl_entry_cmd *req,
8250 struct hclge_desc *mc_desc)
8251 {
8252 struct hclge_dev *hdev = vport->back;
8253 int cfg_status;
8254 u8 resp_code;
8255 u16 retval;
8256 int ret;
8257
8258 if (!mc_desc) {
8259 struct hclge_desc desc;
8260
8261 hclge_cmd_setup_basic_desc(&desc,
8262 HCLGE_OPC_MAC_VLAN_ADD,
8263 false);
8264 memcpy(desc.data, req,
8265 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8266 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8267 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8268 retval = le16_to_cpu(desc.retval);
8269
8270 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8271 resp_code,
8272 HCLGE_MAC_VLAN_ADD);
8273 } else {
8274 hclge_comm_cmd_reuse_desc(&mc_desc[0], false);
8275 mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8276 hclge_comm_cmd_reuse_desc(&mc_desc[1], false);
8277 mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8278 hclge_comm_cmd_reuse_desc(&mc_desc[2], false);
8279 mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT);
8280 memcpy(mc_desc[0].data, req,
8281 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8282 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8283 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8284 retval = le16_to_cpu(mc_desc[0].retval);
8285
8286 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8287 resp_code,
8288 HCLGE_MAC_VLAN_ADD);
8289 }
8290
8291 if (ret) {
8292 dev_err(&hdev->pdev->dev,
8293 "add mac addr failed for cmd_send, ret =%d.\n",
8294 ret);
8295 return ret;
8296 }
8297
8298 return cfg_status;
8299 }
8300
hclge_set_umv_space(struct hclge_dev * hdev,u16 space_size,u16 * allocated_size)8301 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8302 u16 *allocated_size)
8303 {
8304 struct hclge_umv_spc_alc_cmd *req;
8305 struct hclge_desc desc;
8306 int ret;
8307
8308 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8309 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8310
8311 req->space_size = cpu_to_le32(space_size);
8312
8313 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8314 if (ret) {
8315 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8316 ret);
8317 return ret;
8318 }
8319
8320 *allocated_size = le32_to_cpu(desc.data[1]);
8321
8322 return 0;
8323 }
8324
hclge_init_umv_space(struct hclge_dev * hdev)8325 static int hclge_init_umv_space(struct hclge_dev *hdev)
8326 {
8327 u16 allocated_size = 0;
8328 int ret;
8329
8330 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8331 if (ret)
8332 return ret;
8333
8334 if (allocated_size < hdev->wanted_umv_size)
8335 dev_warn(&hdev->pdev->dev,
8336 "failed to alloc umv space, want %u, get %u\n",
8337 hdev->wanted_umv_size, allocated_size);
8338
8339 hdev->max_umv_size = allocated_size;
8340 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8341 hdev->share_umv_size = hdev->priv_umv_size +
8342 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8343
8344 if (hdev->ae_dev->dev_specs.mc_mac_size)
8345 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
8346
8347 return 0;
8348 }
8349
hclge_reset_umv_space(struct hclge_dev * hdev)8350 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8351 {
8352 struct hclge_vport *vport;
8353 int i;
8354
8355 for (i = 0; i < hdev->num_alloc_vport; i++) {
8356 vport = &hdev->vport[i];
8357 vport->used_umv_num = 0;
8358 }
8359
8360 mutex_lock(&hdev->vport_lock);
8361 hdev->share_umv_size = hdev->priv_umv_size +
8362 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8363 mutex_unlock(&hdev->vport_lock);
8364
8365 hdev->used_mc_mac_num = 0;
8366 }
8367
hclge_is_umv_space_full(struct hclge_vport * vport,bool need_lock)8368 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8369 {
8370 struct hclge_dev *hdev = vport->back;
8371 bool is_full;
8372
8373 if (need_lock)
8374 mutex_lock(&hdev->vport_lock);
8375
8376 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8377 hdev->share_umv_size == 0);
8378
8379 if (need_lock)
8380 mutex_unlock(&hdev->vport_lock);
8381
8382 return is_full;
8383 }
8384
hclge_update_umv_space(struct hclge_vport * vport,bool is_free)8385 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8386 {
8387 struct hclge_dev *hdev = vport->back;
8388
8389 if (is_free) {
8390 if (vport->used_umv_num > hdev->priv_umv_size)
8391 hdev->share_umv_size++;
8392
8393 if (vport->used_umv_num > 0)
8394 vport->used_umv_num--;
8395 } else {
8396 if (vport->used_umv_num >= hdev->priv_umv_size &&
8397 hdev->share_umv_size > 0)
8398 hdev->share_umv_size--;
8399 vport->used_umv_num++;
8400 }
8401 }
8402
hclge_find_mac_node(struct list_head * list,const u8 * mac_addr)8403 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8404 const u8 *mac_addr)
8405 {
8406 struct hclge_mac_node *mac_node, *tmp;
8407
8408 list_for_each_entry_safe(mac_node, tmp, list, node)
8409 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8410 return mac_node;
8411
8412 return NULL;
8413 }
8414
hclge_update_mac_node(struct hclge_mac_node * mac_node,enum HCLGE_MAC_NODE_STATE state)8415 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8416 enum HCLGE_MAC_NODE_STATE state)
8417 {
8418 switch (state) {
8419 /* from set_rx_mode or tmp_add_list */
8420 case HCLGE_MAC_TO_ADD:
8421 if (mac_node->state == HCLGE_MAC_TO_DEL)
8422 mac_node->state = HCLGE_MAC_ACTIVE;
8423 break;
8424 /* only from set_rx_mode */
8425 case HCLGE_MAC_TO_DEL:
8426 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8427 list_del(&mac_node->node);
8428 kfree(mac_node);
8429 } else {
8430 mac_node->state = HCLGE_MAC_TO_DEL;
8431 }
8432 break;
8433 /* only from tmp_add_list, the mac_node->state won't be
8434 * ACTIVE.
8435 */
8436 case HCLGE_MAC_ACTIVE:
8437 if (mac_node->state == HCLGE_MAC_TO_ADD)
8438 mac_node->state = HCLGE_MAC_ACTIVE;
8439
8440 break;
8441 }
8442 }
8443
hclge_update_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_NODE_STATE state,enum HCLGE_MAC_ADDR_TYPE mac_type,const unsigned char * addr)8444 int hclge_update_mac_list(struct hclge_vport *vport,
8445 enum HCLGE_MAC_NODE_STATE state,
8446 enum HCLGE_MAC_ADDR_TYPE mac_type,
8447 const unsigned char *addr)
8448 {
8449 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8450 struct hclge_dev *hdev = vport->back;
8451 struct hclge_mac_node *mac_node;
8452 struct list_head *list;
8453
8454 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8455 &vport->uc_mac_list : &vport->mc_mac_list;
8456
8457 spin_lock_bh(&vport->mac_list_lock);
8458
8459 /* if the mac addr is already in the mac list, no need to add a new
8460 * one into it, just check the mac addr state, convert it to a new
8461 * state, or just remove it, or do nothing.
8462 */
8463 mac_node = hclge_find_mac_node(list, addr);
8464 if (mac_node) {
8465 hclge_update_mac_node(mac_node, state);
8466 spin_unlock_bh(&vport->mac_list_lock);
8467 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8468 return 0;
8469 }
8470
8471 /* if this address is never added, unnecessary to delete */
8472 if (state == HCLGE_MAC_TO_DEL) {
8473 spin_unlock_bh(&vport->mac_list_lock);
8474 hnae3_format_mac_addr(format_mac_addr, addr);
8475 dev_err(&hdev->pdev->dev,
8476 "failed to delete address %s from mac list\n",
8477 format_mac_addr);
8478 return -ENOENT;
8479 }
8480
8481 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8482 if (!mac_node) {
8483 spin_unlock_bh(&vport->mac_list_lock);
8484 return -ENOMEM;
8485 }
8486
8487 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8488
8489 mac_node->state = state;
8490 ether_addr_copy(mac_node->mac_addr, addr);
8491 list_add_tail(&mac_node->node, list);
8492
8493 spin_unlock_bh(&vport->mac_list_lock);
8494
8495 return 0;
8496 }
8497
hclge_add_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)8498 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8499 const unsigned char *addr)
8500 {
8501 struct hclge_vport *vport = hclge_get_vport(handle);
8502
8503 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8504 addr);
8505 }
8506
hclge_add_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8507 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8508 const unsigned char *addr)
8509 {
8510 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8511 struct hclge_dev *hdev = vport->back;
8512 struct hclge_mac_vlan_tbl_entry_cmd req;
8513 struct hclge_desc desc;
8514 u16 egress_port = 0;
8515 int ret;
8516
8517 /* mac addr check */
8518 if (is_zero_ether_addr(addr) ||
8519 is_broadcast_ether_addr(addr) ||
8520 is_multicast_ether_addr(addr)) {
8521 hnae3_format_mac_addr(format_mac_addr, addr);
8522 dev_err(&hdev->pdev->dev,
8523 "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
8524 format_mac_addr, is_zero_ether_addr(addr),
8525 is_broadcast_ether_addr(addr),
8526 is_multicast_ether_addr(addr));
8527 return -EINVAL;
8528 }
8529
8530 memset(&req, 0, sizeof(req));
8531
8532 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8533 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8534
8535 req.egress_port = cpu_to_le16(egress_port);
8536
8537 hclge_prepare_mac_addr(&req, addr, false);
8538
8539 /* Lookup the mac address in the mac_vlan table, and add
8540 * it if the entry is inexistent. Repeated unicast entry
8541 * is not allowed in the mac vlan table.
8542 */
8543 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8544 if (ret == -ENOENT) {
8545 mutex_lock(&hdev->vport_lock);
8546 if (!hclge_is_umv_space_full(vport, false)) {
8547 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8548 if (!ret)
8549 hclge_update_umv_space(vport, false);
8550 mutex_unlock(&hdev->vport_lock);
8551 return ret;
8552 }
8553 mutex_unlock(&hdev->vport_lock);
8554
8555 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8556 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8557 hdev->priv_umv_size);
8558
8559 return -ENOSPC;
8560 }
8561
8562 /* check if we just hit the duplicate */
8563 if (!ret)
8564 return -EEXIST;
8565
8566 return ret;
8567 }
8568
hclge_rm_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)8569 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8570 const unsigned char *addr)
8571 {
8572 struct hclge_vport *vport = hclge_get_vport(handle);
8573
8574 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8575 addr);
8576 }
8577
hclge_rm_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8578 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8579 const unsigned char *addr)
8580 {
8581 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8582 struct hclge_dev *hdev = vport->back;
8583 struct hclge_mac_vlan_tbl_entry_cmd req;
8584 int ret;
8585
8586 /* mac addr check */
8587 if (is_zero_ether_addr(addr) ||
8588 is_broadcast_ether_addr(addr) ||
8589 is_multicast_ether_addr(addr)) {
8590 hnae3_format_mac_addr(format_mac_addr, addr);
8591 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n",
8592 format_mac_addr);
8593 return -EINVAL;
8594 }
8595
8596 memset(&req, 0, sizeof(req));
8597 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8598 hclge_prepare_mac_addr(&req, addr, false);
8599 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8600 if (!ret || ret == -ENOENT) {
8601 mutex_lock(&hdev->vport_lock);
8602 hclge_update_umv_space(vport, true);
8603 mutex_unlock(&hdev->vport_lock);
8604 return 0;
8605 }
8606
8607 return ret;
8608 }
8609
hclge_add_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)8610 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8611 const unsigned char *addr)
8612 {
8613 struct hclge_vport *vport = hclge_get_vport(handle);
8614
8615 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8616 addr);
8617 }
8618
hclge_add_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8619 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8620 const unsigned char *addr)
8621 {
8622 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8623 struct hclge_dev *hdev = vport->back;
8624 struct hclge_mac_vlan_tbl_entry_cmd req;
8625 struct hclge_desc desc[3];
8626 bool is_new_addr = false;
8627 int status;
8628
8629 /* mac addr check */
8630 if (!is_multicast_ether_addr(addr)) {
8631 hnae3_format_mac_addr(format_mac_addr, addr);
8632 dev_err(&hdev->pdev->dev,
8633 "Add mc mac err! invalid mac:%s.\n",
8634 format_mac_addr);
8635 return -EINVAL;
8636 }
8637 memset(&req, 0, sizeof(req));
8638 hclge_prepare_mac_addr(&req, addr, true);
8639 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8640 if (status) {
8641 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
8642 hdev->used_mc_mac_num >=
8643 hdev->ae_dev->dev_specs.mc_mac_size)
8644 goto err_no_space;
8645
8646 is_new_addr = true;
8647
8648 /* This mac addr do not exist, add new entry for it */
8649 memset(desc[0].data, 0, sizeof(desc[0].data));
8650 memset(desc[1].data, 0, sizeof(desc[0].data));
8651 memset(desc[2].data, 0, sizeof(desc[0].data));
8652 }
8653 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8654 if (status)
8655 return status;
8656 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8657 if (status == -ENOSPC)
8658 goto err_no_space;
8659 else if (!status && is_new_addr)
8660 hdev->used_mc_mac_num++;
8661
8662 return status;
8663
8664 err_no_space:
8665 /* if already overflow, not to print each time */
8666 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) {
8667 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8668 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8669 }
8670
8671 return -ENOSPC;
8672 }
8673
hclge_rm_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)8674 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8675 const unsigned char *addr)
8676 {
8677 struct hclge_vport *vport = hclge_get_vport(handle);
8678
8679 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8680 addr);
8681 }
8682
hclge_rm_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8683 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8684 const unsigned char *addr)
8685 {
8686 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8687 struct hclge_dev *hdev = vport->back;
8688 struct hclge_mac_vlan_tbl_entry_cmd req;
8689 enum hclge_comm_cmd_status status;
8690 struct hclge_desc desc[3];
8691
8692 /* mac addr check */
8693 if (!is_multicast_ether_addr(addr)) {
8694 hnae3_format_mac_addr(format_mac_addr, addr);
8695 dev_dbg(&hdev->pdev->dev,
8696 "Remove mc mac err! invalid mac:%s.\n",
8697 format_mac_addr);
8698 return -EINVAL;
8699 }
8700
8701 memset(&req, 0, sizeof(req));
8702 hclge_prepare_mac_addr(&req, addr, true);
8703 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8704 if (!status) {
8705 /* This mac addr exist, remove this handle's VFID for it */
8706 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8707 if (status)
8708 return status;
8709
8710 if (hclge_is_all_function_id_zero(desc)) {
8711 /* All the vfid is zero, so need to delete this entry */
8712 status = hclge_remove_mac_vlan_tbl(vport, &req);
8713 if (!status)
8714 hdev->used_mc_mac_num--;
8715 } else {
8716 /* Not all the vfid is zero, update the vfid */
8717 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8718 }
8719 } else if (status == -ENOENT) {
8720 status = 0;
8721 }
8722
8723 return status;
8724 }
8725
hclge_sync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,enum HCLGE_MAC_ADDR_TYPE mac_type)8726 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8727 struct list_head *list,
8728 enum HCLGE_MAC_ADDR_TYPE mac_type)
8729 {
8730 int (*sync)(struct hclge_vport *vport, const unsigned char *addr);
8731 struct hclge_mac_node *mac_node, *tmp;
8732 int ret;
8733
8734 if (mac_type == HCLGE_MAC_ADDR_UC)
8735 sync = hclge_add_uc_addr_common;
8736 else
8737 sync = hclge_add_mc_addr_common;
8738
8739 list_for_each_entry_safe(mac_node, tmp, list, node) {
8740 ret = sync(vport, mac_node->mac_addr);
8741 if (!ret) {
8742 mac_node->state = HCLGE_MAC_ACTIVE;
8743 } else {
8744 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8745 &vport->state);
8746
8747 /* If one unicast mac address is existing in hardware,
8748 * we need to try whether other unicast mac addresses
8749 * are new addresses that can be added.
8750 * Multicast mac address can be reusable, even though
8751 * there is no space to add new multicast mac address,
8752 * we should check whether other mac addresses are
8753 * existing in hardware for reuse.
8754 */
8755 if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) ||
8756 (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC))
8757 break;
8758 }
8759 }
8760 }
8761
hclge_unsync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,enum HCLGE_MAC_ADDR_TYPE mac_type)8762 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8763 struct list_head *list,
8764 enum HCLGE_MAC_ADDR_TYPE mac_type)
8765 {
8766 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8767 struct hclge_mac_node *mac_node, *tmp;
8768 int ret;
8769
8770 if (mac_type == HCLGE_MAC_ADDR_UC)
8771 unsync = hclge_rm_uc_addr_common;
8772 else
8773 unsync = hclge_rm_mc_addr_common;
8774
8775 list_for_each_entry_safe(mac_node, tmp, list, node) {
8776 ret = unsync(vport, mac_node->mac_addr);
8777 if (!ret || ret == -ENOENT) {
8778 list_del(&mac_node->node);
8779 kfree(mac_node);
8780 } else {
8781 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8782 &vport->state);
8783 break;
8784 }
8785 }
8786 }
8787
hclge_sync_from_add_list(struct list_head * add_list,struct list_head * mac_list)8788 static bool hclge_sync_from_add_list(struct list_head *add_list,
8789 struct list_head *mac_list)
8790 {
8791 struct hclge_mac_node *mac_node, *tmp, *new_node;
8792 bool all_added = true;
8793
8794 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8795 if (mac_node->state == HCLGE_MAC_TO_ADD)
8796 all_added = false;
8797
8798 /* if the mac address from tmp_add_list is not in the
8799 * uc/mc_mac_list, it means have received a TO_DEL request
8800 * during the time window of adding the mac address into mac
8801 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8802 * then it will be removed at next time. else it must be TO_ADD,
8803 * this address hasn't been added into mac table,
8804 * so just remove the mac node.
8805 */
8806 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8807 if (new_node) {
8808 hclge_update_mac_node(new_node, mac_node->state);
8809 list_del(&mac_node->node);
8810 kfree(mac_node);
8811 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8812 mac_node->state = HCLGE_MAC_TO_DEL;
8813 list_move_tail(&mac_node->node, mac_list);
8814 } else {
8815 list_del(&mac_node->node);
8816 kfree(mac_node);
8817 }
8818 }
8819
8820 return all_added;
8821 }
8822
hclge_sync_from_del_list(struct list_head * del_list,struct list_head * mac_list)8823 static void hclge_sync_from_del_list(struct list_head *del_list,
8824 struct list_head *mac_list)
8825 {
8826 struct hclge_mac_node *mac_node, *tmp, *new_node;
8827
8828 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8829 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8830 if (new_node) {
8831 /* If the mac addr exists in the mac list, it means
8832 * received a new TO_ADD request during the time window
8833 * of configuring the mac address. For the mac node
8834 * state is TO_ADD, and the address is already in the
8835 * in the hardware(due to delete fail), so we just need
8836 * to change the mac node state to ACTIVE.
8837 */
8838 new_node->state = HCLGE_MAC_ACTIVE;
8839 list_del(&mac_node->node);
8840 kfree(mac_node);
8841 } else {
8842 list_move_tail(&mac_node->node, mac_list);
8843 }
8844 }
8845 }
8846
hclge_update_overflow_flags(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type,bool is_all_added)8847 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8848 enum HCLGE_MAC_ADDR_TYPE mac_type,
8849 bool is_all_added)
8850 {
8851 if (mac_type == HCLGE_MAC_ADDR_UC) {
8852 if (is_all_added)
8853 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8854 else if (hclge_is_umv_space_full(vport, true))
8855 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8856 } else {
8857 if (is_all_added)
8858 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8859 else
8860 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8861 }
8862 }
8863
hclge_sync_vport_mac_table(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)8864 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8865 enum HCLGE_MAC_ADDR_TYPE mac_type)
8866 {
8867 struct hclge_mac_node *mac_node, *tmp, *new_node;
8868 struct list_head tmp_add_list, tmp_del_list;
8869 struct list_head *list;
8870 bool all_added;
8871
8872 INIT_LIST_HEAD(&tmp_add_list);
8873 INIT_LIST_HEAD(&tmp_del_list);
8874
8875 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8876 * we can add/delete these mac addr outside the spin lock
8877 */
8878 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8879 &vport->uc_mac_list : &vport->mc_mac_list;
8880
8881 spin_lock_bh(&vport->mac_list_lock);
8882
8883 list_for_each_entry_safe(mac_node, tmp, list, node) {
8884 switch (mac_node->state) {
8885 case HCLGE_MAC_TO_DEL:
8886 list_move_tail(&mac_node->node, &tmp_del_list);
8887 break;
8888 case HCLGE_MAC_TO_ADD:
8889 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8890 if (!new_node)
8891 goto stop_traverse;
8892 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8893 new_node->state = mac_node->state;
8894 list_add_tail(&new_node->node, &tmp_add_list);
8895 break;
8896 default:
8897 break;
8898 }
8899 }
8900
8901 stop_traverse:
8902 spin_unlock_bh(&vport->mac_list_lock);
8903
8904 /* delete first, in order to get max mac table space for adding */
8905 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
8906 hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type);
8907
8908 /* if some mac addresses were added/deleted fail, move back to the
8909 * mac_list, and retry at next time.
8910 */
8911 spin_lock_bh(&vport->mac_list_lock);
8912
8913 hclge_sync_from_del_list(&tmp_del_list, list);
8914 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8915
8916 spin_unlock_bh(&vport->mac_list_lock);
8917
8918 hclge_update_overflow_flags(vport, mac_type, all_added);
8919 }
8920
hclge_need_sync_mac_table(struct hclge_vport * vport)8921 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8922 {
8923 struct hclge_dev *hdev = vport->back;
8924
8925 if (test_bit(vport->vport_id, hdev->vport_config_block))
8926 return false;
8927
8928 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8929 return true;
8930
8931 return false;
8932 }
8933
hclge_sync_mac_table(struct hclge_dev * hdev)8934 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8935 {
8936 int i;
8937
8938 for (i = 0; i < hdev->num_alloc_vport; i++) {
8939 struct hclge_vport *vport = &hdev->vport[i];
8940
8941 if (!hclge_need_sync_mac_table(vport))
8942 continue;
8943
8944 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8945 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8946 }
8947 }
8948
hclge_build_del_list(struct list_head * list,bool is_del_list,struct list_head * tmp_del_list)8949 static void hclge_build_del_list(struct list_head *list,
8950 bool is_del_list,
8951 struct list_head *tmp_del_list)
8952 {
8953 struct hclge_mac_node *mac_cfg, *tmp;
8954
8955 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8956 switch (mac_cfg->state) {
8957 case HCLGE_MAC_TO_DEL:
8958 case HCLGE_MAC_ACTIVE:
8959 list_move_tail(&mac_cfg->node, tmp_del_list);
8960 break;
8961 case HCLGE_MAC_TO_ADD:
8962 if (is_del_list) {
8963 list_del(&mac_cfg->node);
8964 kfree(mac_cfg);
8965 }
8966 break;
8967 }
8968 }
8969 }
8970
hclge_unsync_del_list(struct hclge_vport * vport,int (* unsync)(struct hclge_vport * vport,const unsigned char * addr),bool is_del_list,struct list_head * tmp_del_list)8971 static void hclge_unsync_del_list(struct hclge_vport *vport,
8972 int (*unsync)(struct hclge_vport *vport,
8973 const unsigned char *addr),
8974 bool is_del_list,
8975 struct list_head *tmp_del_list)
8976 {
8977 struct hclge_mac_node *mac_cfg, *tmp;
8978 int ret;
8979
8980 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8981 ret = unsync(vport, mac_cfg->mac_addr);
8982 if (!ret || ret == -ENOENT) {
8983 /* clear all mac addr from hardware, but remain these
8984 * mac addr in the mac list, and restore them after
8985 * vf reset finished.
8986 */
8987 if (!is_del_list &&
8988 mac_cfg->state == HCLGE_MAC_ACTIVE) {
8989 mac_cfg->state = HCLGE_MAC_TO_ADD;
8990 } else {
8991 list_del(&mac_cfg->node);
8992 kfree(mac_cfg);
8993 }
8994 } else if (is_del_list) {
8995 mac_cfg->state = HCLGE_MAC_TO_DEL;
8996 }
8997 }
8998 }
8999
hclge_rm_vport_all_mac_table(struct hclge_vport * vport,bool is_del_list,enum HCLGE_MAC_ADDR_TYPE mac_type)9000 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9001 enum HCLGE_MAC_ADDR_TYPE mac_type)
9002 {
9003 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9004 struct hclge_dev *hdev = vport->back;
9005 struct list_head tmp_del_list, *list;
9006
9007 if (mac_type == HCLGE_MAC_ADDR_UC) {
9008 list = &vport->uc_mac_list;
9009 unsync = hclge_rm_uc_addr_common;
9010 } else {
9011 list = &vport->mc_mac_list;
9012 unsync = hclge_rm_mc_addr_common;
9013 }
9014
9015 INIT_LIST_HEAD(&tmp_del_list);
9016
9017 if (!is_del_list)
9018 set_bit(vport->vport_id, hdev->vport_config_block);
9019
9020 spin_lock_bh(&vport->mac_list_lock);
9021
9022 hclge_build_del_list(list, is_del_list, &tmp_del_list);
9023
9024 spin_unlock_bh(&vport->mac_list_lock);
9025
9026 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9027
9028 spin_lock_bh(&vport->mac_list_lock);
9029
9030 hclge_sync_from_del_list(&tmp_del_list, list);
9031
9032 spin_unlock_bh(&vport->mac_list_lock);
9033 }
9034
9035 /* remove all mac address when uninitailize */
hclge_uninit_vport_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)9036 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9037 enum HCLGE_MAC_ADDR_TYPE mac_type)
9038 {
9039 struct hclge_mac_node *mac_node, *tmp;
9040 struct hclge_dev *hdev = vport->back;
9041 struct list_head tmp_del_list, *list;
9042
9043 INIT_LIST_HEAD(&tmp_del_list);
9044
9045 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9046 &vport->uc_mac_list : &vport->mc_mac_list;
9047
9048 spin_lock_bh(&vport->mac_list_lock);
9049
9050 list_for_each_entry_safe(mac_node, tmp, list, node) {
9051 switch (mac_node->state) {
9052 case HCLGE_MAC_TO_DEL:
9053 case HCLGE_MAC_ACTIVE:
9054 list_move_tail(&mac_node->node, &tmp_del_list);
9055 break;
9056 case HCLGE_MAC_TO_ADD:
9057 list_del(&mac_node->node);
9058 kfree(mac_node);
9059 break;
9060 }
9061 }
9062
9063 spin_unlock_bh(&vport->mac_list_lock);
9064
9065 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
9066
9067 if (!list_empty(&tmp_del_list))
9068 dev_warn(&hdev->pdev->dev,
9069 "uninit %s mac list for vport %u not completely.\n",
9070 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9071 vport->vport_id);
9072
9073 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9074 list_del(&mac_node->node);
9075 kfree(mac_node);
9076 }
9077 }
9078
hclge_uninit_mac_table(struct hclge_dev * hdev)9079 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9080 {
9081 struct hclge_vport *vport;
9082 int i;
9083
9084 for (i = 0; i < hdev->num_alloc_vport; i++) {
9085 vport = &hdev->vport[i];
9086 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9087 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9088 }
9089 }
9090
hclge_get_mac_ethertype_cmd_status(struct hclge_dev * hdev,u16 cmdq_resp,u8 resp_code)9091 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9092 u16 cmdq_resp, u8 resp_code)
9093 {
9094 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9095 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9096 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9097 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9098
9099 int return_status;
9100
9101 if (cmdq_resp) {
9102 dev_err(&hdev->pdev->dev,
9103 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9104 cmdq_resp);
9105 return -EIO;
9106 }
9107
9108 switch (resp_code) {
9109 case HCLGE_ETHERTYPE_SUCCESS_ADD:
9110 case HCLGE_ETHERTYPE_ALREADY_ADD:
9111 return_status = 0;
9112 break;
9113 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9114 dev_err(&hdev->pdev->dev,
9115 "add mac ethertype failed for manager table overflow.\n");
9116 return_status = -EIO;
9117 break;
9118 case HCLGE_ETHERTYPE_KEY_CONFLICT:
9119 dev_err(&hdev->pdev->dev,
9120 "add mac ethertype failed for key conflict.\n");
9121 return_status = -EIO;
9122 break;
9123 default:
9124 dev_err(&hdev->pdev->dev,
9125 "add mac ethertype failed for undefined, code=%u.\n",
9126 resp_code);
9127 return_status = -EIO;
9128 }
9129
9130 return return_status;
9131 }
9132
hclge_set_vf_mac(struct hnae3_handle * handle,int vf,u8 * mac_addr)9133 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9134 u8 *mac_addr)
9135 {
9136 struct hclge_vport *vport = hclge_get_vport(handle);
9137 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
9138 struct hclge_dev *hdev = vport->back;
9139
9140 vport = hclge_get_vf_vport(hdev, vf);
9141 if (!vport)
9142 return -EINVAL;
9143
9144 hnae3_format_mac_addr(format_mac_addr, mac_addr);
9145 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9146 dev_info(&hdev->pdev->dev,
9147 "Specified MAC(=%s) is same as before, no change committed!\n",
9148 format_mac_addr);
9149 return 0;
9150 }
9151
9152 ether_addr_copy(vport->vf_info.mac, mac_addr);
9153
9154 /* there is a timewindow for PF to know VF unalive, it may
9155 * cause send mailbox fail, but it doesn't matter, VF will
9156 * query it when reinit.
9157 */
9158 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9159 dev_info(&hdev->pdev->dev,
9160 "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
9161 vf, format_mac_addr);
9162 (void)hclge_inform_reset_assert_to_vf(vport);
9163 return 0;
9164 }
9165
9166 dev_info(&hdev->pdev->dev,
9167 "MAC of VF %d has been set to %s, will be active after VF reset\n",
9168 vf, format_mac_addr);
9169 return 0;
9170 }
9171
hclge_add_mgr_tbl(struct hclge_dev * hdev,const struct hclge_mac_mgr_tbl_entry_cmd * req)9172 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9173 const struct hclge_mac_mgr_tbl_entry_cmd *req)
9174 {
9175 struct hclge_desc desc;
9176 u8 resp_code;
9177 u16 retval;
9178 int ret;
9179
9180 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9181 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9182
9183 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9184 if (ret) {
9185 dev_err(&hdev->pdev->dev,
9186 "add mac ethertype failed for cmd_send, ret =%d.\n",
9187 ret);
9188 return ret;
9189 }
9190
9191 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9192 retval = le16_to_cpu(desc.retval);
9193
9194 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9195 }
9196
init_mgr_tbl(struct hclge_dev * hdev)9197 static int init_mgr_tbl(struct hclge_dev *hdev)
9198 {
9199 int ret;
9200 int i;
9201
9202 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9203 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9204 if (ret) {
9205 dev_err(&hdev->pdev->dev,
9206 "add mac ethertype failed, ret =%d.\n",
9207 ret);
9208 return ret;
9209 }
9210 }
9211
9212 return 0;
9213 }
9214
hclge_get_mac_addr(struct hnae3_handle * handle,u8 * p)9215 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9216 {
9217 struct hclge_vport *vport = hclge_get_vport(handle);
9218 struct hclge_dev *hdev = vport->back;
9219
9220 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9221 }
9222
hclge_update_mac_node_for_dev_addr(struct hclge_vport * vport,const u8 * old_addr,const u8 * new_addr)9223 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9224 const u8 *old_addr, const u8 *new_addr)
9225 {
9226 struct list_head *list = &vport->uc_mac_list;
9227 struct hclge_mac_node *old_node, *new_node;
9228
9229 new_node = hclge_find_mac_node(list, new_addr);
9230 if (!new_node) {
9231 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9232 if (!new_node)
9233 return -ENOMEM;
9234
9235 new_node->state = HCLGE_MAC_TO_ADD;
9236 ether_addr_copy(new_node->mac_addr, new_addr);
9237 list_add(&new_node->node, list);
9238 } else {
9239 if (new_node->state == HCLGE_MAC_TO_DEL)
9240 new_node->state = HCLGE_MAC_ACTIVE;
9241
9242 /* make sure the new addr is in the list head, avoid dev
9243 * addr may be not re-added into mac table for the umv space
9244 * limitation after global/imp reset which will clear mac
9245 * table by hardware.
9246 */
9247 list_move(&new_node->node, list);
9248 }
9249
9250 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9251 old_node = hclge_find_mac_node(list, old_addr);
9252 if (old_node) {
9253 if (old_node->state == HCLGE_MAC_TO_ADD) {
9254 list_del(&old_node->node);
9255 kfree(old_node);
9256 } else {
9257 old_node->state = HCLGE_MAC_TO_DEL;
9258 }
9259 }
9260 }
9261
9262 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9263
9264 return 0;
9265 }
9266
hclge_set_mac_addr(struct hnae3_handle * handle,const void * p,bool is_first)9267 static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
9268 bool is_first)
9269 {
9270 const unsigned char *new_addr = (const unsigned char *)p;
9271 struct hclge_vport *vport = hclge_get_vport(handle);
9272 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
9273 struct hclge_dev *hdev = vport->back;
9274 unsigned char *old_addr = NULL;
9275 int ret;
9276
9277 /* mac addr check */
9278 if (is_zero_ether_addr(new_addr) ||
9279 is_broadcast_ether_addr(new_addr) ||
9280 is_multicast_ether_addr(new_addr)) {
9281 hnae3_format_mac_addr(format_mac_addr, new_addr);
9282 dev_err(&hdev->pdev->dev,
9283 "change uc mac err! invalid mac: %s.\n",
9284 format_mac_addr);
9285 return -EINVAL;
9286 }
9287
9288 ret = hclge_pause_addr_cfg(hdev, new_addr);
9289 if (ret) {
9290 dev_err(&hdev->pdev->dev,
9291 "failed to configure mac pause address, ret = %d\n",
9292 ret);
9293 return ret;
9294 }
9295
9296 if (!is_first)
9297 old_addr = hdev->hw.mac.mac_addr;
9298
9299 spin_lock_bh(&vport->mac_list_lock);
9300 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9301 if (ret) {
9302 hnae3_format_mac_addr(format_mac_addr, new_addr);
9303 dev_err(&hdev->pdev->dev,
9304 "failed to change the mac addr:%s, ret = %d\n",
9305 format_mac_addr, ret);
9306 spin_unlock_bh(&vport->mac_list_lock);
9307
9308 if (!is_first)
9309 hclge_pause_addr_cfg(hdev, old_addr);
9310
9311 return ret;
9312 }
9313 /* we must update dev addr with spin lock protect, preventing dev addr
9314 * being removed by set_rx_mode path.
9315 */
9316 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9317 spin_unlock_bh(&vport->mac_list_lock);
9318
9319 hclge_task_schedule(hdev, 0);
9320
9321 return 0;
9322 }
9323
hclge_mii_ioctl(struct hclge_dev * hdev,struct ifreq * ifr,int cmd)9324 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9325 {
9326 struct mii_ioctl_data *data = if_mii(ifr);
9327
9328 if (!hnae3_dev_phy_imp_supported(hdev))
9329 return -EOPNOTSUPP;
9330
9331 switch (cmd) {
9332 case SIOCGMIIPHY:
9333 data->phy_id = hdev->hw.mac.phy_addr;
9334 /* this command reads phy id and register at the same time */
9335 fallthrough;
9336 case SIOCGMIIREG:
9337 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9338 return 0;
9339
9340 case SIOCSMIIREG:
9341 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9342 default:
9343 return -EOPNOTSUPP;
9344 }
9345 }
9346
hclge_do_ioctl(struct hnae3_handle * handle,struct ifreq * ifr,int cmd)9347 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9348 int cmd)
9349 {
9350 struct hclge_vport *vport = hclge_get_vport(handle);
9351 struct hclge_dev *hdev = vport->back;
9352
9353 switch (cmd) {
9354 case SIOCGHWTSTAMP:
9355 return hclge_ptp_get_cfg(hdev, ifr);
9356 case SIOCSHWTSTAMP:
9357 return hclge_ptp_set_cfg(hdev, ifr);
9358 default:
9359 if (!hdev->hw.mac.phydev)
9360 return hclge_mii_ioctl(hdev, ifr, cmd);
9361 }
9362
9363 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9364 }
9365
hclge_set_port_vlan_filter_bypass(struct hclge_dev * hdev,u8 vf_id,bool bypass_en)9366 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9367 bool bypass_en)
9368 {
9369 struct hclge_port_vlan_filter_bypass_cmd *req;
9370 struct hclge_desc desc;
9371 int ret;
9372
9373 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9374 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9375 req->vf_id = vf_id;
9376 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9377 bypass_en ? 1 : 0);
9378
9379 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9380 if (ret)
9381 dev_err(&hdev->pdev->dev,
9382 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9383 vf_id, ret);
9384
9385 return ret;
9386 }
9387
hclge_set_vlan_filter_ctrl(struct hclge_dev * hdev,u8 vlan_type,u8 fe_type,bool filter_en,u8 vf_id)9388 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9389 u8 fe_type, bool filter_en, u8 vf_id)
9390 {
9391 struct hclge_vlan_filter_ctrl_cmd *req;
9392 struct hclge_desc desc;
9393 int ret;
9394
9395 /* read current vlan filter parameter */
9396 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9397 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9398 req->vlan_type = vlan_type;
9399 req->vf_id = vf_id;
9400
9401 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9402 if (ret) {
9403 dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n",
9404 vf_id, ret);
9405 return ret;
9406 }
9407
9408 /* modify and write new config parameter */
9409 hclge_comm_cmd_reuse_desc(&desc, false);
9410 req->vlan_fe = filter_en ?
9411 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9412
9413 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9414 if (ret)
9415 dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n",
9416 vf_id, ret);
9417
9418 return ret;
9419 }
9420
hclge_set_vport_vlan_filter(struct hclge_vport * vport,bool enable)9421 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9422 {
9423 struct hclge_dev *hdev = vport->back;
9424 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9425 int ret;
9426
9427 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9428 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9429 HCLGE_FILTER_FE_EGRESS_V1_B,
9430 enable, vport->vport_id);
9431
9432 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9433 HCLGE_FILTER_FE_EGRESS, enable,
9434 vport->vport_id);
9435 if (ret)
9436 return ret;
9437
9438 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9439 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9440 !enable);
9441 } else if (!vport->vport_id) {
9442 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9443 enable = false;
9444
9445 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9446 HCLGE_FILTER_FE_INGRESS,
9447 enable, 0);
9448 }
9449
9450 return ret;
9451 }
9452
hclge_need_enable_vport_vlan_filter(struct hclge_vport * vport)9453 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9454 {
9455 struct hnae3_handle *handle = &vport->nic;
9456 struct hclge_vport_vlan_cfg *vlan, *tmp;
9457 struct hclge_dev *hdev = vport->back;
9458
9459 if (vport->vport_id) {
9460 if (vport->port_base_vlan_cfg.state !=
9461 HNAE3_PORT_BASE_VLAN_DISABLE)
9462 return true;
9463
9464 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9465 return false;
9466 } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9467 return false;
9468 }
9469
9470 if (!vport->req_vlan_fltr_en)
9471 return false;
9472
9473 /* compatible with former device, always enable vlan filter */
9474 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9475 return true;
9476
9477 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9478 if (vlan->vlan_id != 0)
9479 return true;
9480
9481 return false;
9482 }
9483
hclge_enable_vport_vlan_filter(struct hclge_vport * vport,bool request_en)9484 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9485 {
9486 struct hclge_dev *hdev = vport->back;
9487 bool need_en;
9488 int ret;
9489
9490 mutex_lock(&hdev->vport_lock);
9491
9492 vport->req_vlan_fltr_en = request_en;
9493
9494 need_en = hclge_need_enable_vport_vlan_filter(vport);
9495 if (need_en == vport->cur_vlan_fltr_en) {
9496 mutex_unlock(&hdev->vport_lock);
9497 return 0;
9498 }
9499
9500 ret = hclge_set_vport_vlan_filter(vport, need_en);
9501 if (ret) {
9502 mutex_unlock(&hdev->vport_lock);
9503 return ret;
9504 }
9505
9506 vport->cur_vlan_fltr_en = need_en;
9507
9508 mutex_unlock(&hdev->vport_lock);
9509
9510 return 0;
9511 }
9512
hclge_enable_vlan_filter(struct hnae3_handle * handle,bool enable)9513 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9514 {
9515 struct hclge_vport *vport = hclge_get_vport(handle);
9516
9517 return hclge_enable_vport_vlan_filter(vport, enable);
9518 }
9519
hclge_set_vf_vlan_filter_cmd(struct hclge_dev * hdev,u16 vfid,bool is_kill,u16 vlan,struct hclge_desc * desc)9520 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9521 bool is_kill, u16 vlan,
9522 struct hclge_desc *desc)
9523 {
9524 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9525 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9526 u8 vf_byte_val;
9527 u8 vf_byte_off;
9528 int ret;
9529
9530 hclge_cmd_setup_basic_desc(&desc[0],
9531 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9532 hclge_cmd_setup_basic_desc(&desc[1],
9533 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9534
9535 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
9536
9537 vf_byte_off = vfid / 8;
9538 vf_byte_val = 1 << (vfid % 8);
9539
9540 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9541 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9542
9543 req0->vlan_id = cpu_to_le16(vlan);
9544 req0->vlan_cfg = is_kill;
9545
9546 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9547 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9548 else
9549 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9550
9551 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9552 if (ret) {
9553 dev_err(&hdev->pdev->dev,
9554 "Send vf vlan command fail, ret =%d.\n",
9555 ret);
9556 return ret;
9557 }
9558
9559 return 0;
9560 }
9561
hclge_check_vf_vlan_cmd_status(struct hclge_dev * hdev,u16 vfid,bool is_kill,struct hclge_desc * desc)9562 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9563 bool is_kill, struct hclge_desc *desc)
9564 {
9565 struct hclge_vlan_filter_vf_cfg_cmd *req;
9566
9567 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9568
9569 if (!is_kill) {
9570 #define HCLGE_VF_VLAN_NO_ENTRY 2
9571 if (!req->resp_code || req->resp_code == 1)
9572 return 0;
9573
9574 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9575 set_bit(vfid, hdev->vf_vlan_full);
9576 dev_warn(&hdev->pdev->dev,
9577 "vf vlan table is full, vf vlan filter is disabled\n");
9578 return 0;
9579 }
9580
9581 dev_err(&hdev->pdev->dev,
9582 "Add vf vlan filter fail, ret =%u.\n",
9583 req->resp_code);
9584 } else {
9585 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9586 if (!req->resp_code)
9587 return 0;
9588
9589 /* vf vlan filter is disabled when vf vlan table is full,
9590 * then new vlan id will not be added into vf vlan table.
9591 * Just return 0 without warning, avoid massive verbose
9592 * print logs when unload.
9593 */
9594 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9595 return 0;
9596
9597 dev_err(&hdev->pdev->dev,
9598 "Kill vf vlan filter fail, ret =%u.\n",
9599 req->resp_code);
9600 }
9601
9602 return -EIO;
9603 }
9604
hclge_set_vf_vlan_common(struct hclge_dev * hdev,u16 vfid,bool is_kill,u16 vlan)9605 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9606 bool is_kill, u16 vlan)
9607 {
9608 struct hclge_vport *vport = &hdev->vport[vfid];
9609 struct hclge_desc desc[2];
9610 int ret;
9611
9612 /* if vf vlan table is full, firmware will close vf vlan filter, it
9613 * is unable and unnecessary to add new vlan id to vf vlan filter.
9614 * If spoof check is enable, and vf vlan is full, it shouldn't add
9615 * new vlan, because tx packets with these vlan id will be dropped.
9616 */
9617 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9618 if (vport->vf_info.spoofchk && vlan) {
9619 dev_err(&hdev->pdev->dev,
9620 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9621 return -EPERM;
9622 }
9623 return 0;
9624 }
9625
9626 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9627 if (ret)
9628 return ret;
9629
9630 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9631 }
9632
hclge_set_port_vlan_filter(struct hclge_dev * hdev,__be16 proto,u16 vlan_id,bool is_kill)9633 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9634 u16 vlan_id, bool is_kill)
9635 {
9636 struct hclge_vlan_filter_pf_cfg_cmd *req;
9637 struct hclge_desc desc;
9638 u8 vlan_offset_byte_val;
9639 u8 vlan_offset_byte;
9640 u8 vlan_offset_160;
9641 int ret;
9642
9643 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9644
9645 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9646 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9647 HCLGE_VLAN_BYTE_SIZE;
9648 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9649
9650 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9651 req->vlan_offset = vlan_offset_160;
9652 req->vlan_cfg = is_kill;
9653 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9654
9655 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9656 if (ret)
9657 dev_err(&hdev->pdev->dev,
9658 "port vlan command, send fail, ret =%d.\n", ret);
9659 return ret;
9660 }
9661
hclge_need_update_port_vlan(struct hclge_dev * hdev,u16 vport_id,u16 vlan_id,bool is_kill)9662 static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id,
9663 u16 vlan_id, bool is_kill)
9664 {
9665 /* vlan 0 may be added twice when 8021q module is enabled */
9666 if (!is_kill && !vlan_id &&
9667 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9668 return false;
9669
9670 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9671 dev_warn(&hdev->pdev->dev,
9672 "Add port vlan failed, vport %u is already in vlan %u\n",
9673 vport_id, vlan_id);
9674 return false;
9675 }
9676
9677 if (is_kill &&
9678 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9679 dev_warn(&hdev->pdev->dev,
9680 "Delete port vlan failed, vport %u is not in vlan %u\n",
9681 vport_id, vlan_id);
9682 return false;
9683 }
9684
9685 return true;
9686 }
9687
hclge_set_vlan_filter_hw(struct hclge_dev * hdev,__be16 proto,u16 vport_id,u16 vlan_id,bool is_kill)9688 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9689 u16 vport_id, u16 vlan_id,
9690 bool is_kill)
9691 {
9692 u16 vport_idx, vport_num = 0;
9693 int ret;
9694
9695 if (is_kill && !vlan_id)
9696 return 0;
9697
9698 if (vlan_id >= VLAN_N_VID)
9699 return -EINVAL;
9700
9701 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9702 if (ret) {
9703 dev_err(&hdev->pdev->dev,
9704 "Set %u vport vlan filter config fail, ret =%d.\n",
9705 vport_id, ret);
9706 return ret;
9707 }
9708
9709 if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill))
9710 return 0;
9711
9712 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9713 vport_num++;
9714
9715 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9716 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9717 is_kill);
9718
9719 return ret;
9720 }
9721
hclge_set_vlan_tx_offload_cfg(struct hclge_vport * vport)9722 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9723 {
9724 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9725 struct hclge_vport_vtag_tx_cfg_cmd *req;
9726 struct hclge_dev *hdev = vport->back;
9727 struct hclge_desc desc;
9728 u16 bmap_index;
9729 int status;
9730
9731 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9732
9733 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9734 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9735 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9736 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9737 vcfg->accept_tag1 ? 1 : 0);
9738 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9739 vcfg->accept_untag1 ? 1 : 0);
9740 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9741 vcfg->accept_tag2 ? 1 : 0);
9742 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9743 vcfg->accept_untag2 ? 1 : 0);
9744 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9745 vcfg->insert_tag1_en ? 1 : 0);
9746 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9747 vcfg->insert_tag2_en ? 1 : 0);
9748 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9749 vcfg->tag_shift_mode_en ? 1 : 0);
9750 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9751
9752 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9753 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9754 HCLGE_VF_NUM_PER_BYTE;
9755 req->vf_bitmap[bmap_index] =
9756 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9757
9758 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9759 if (status)
9760 dev_err(&hdev->pdev->dev,
9761 "Send port txvlan cfg command fail, ret =%d\n",
9762 status);
9763
9764 return status;
9765 }
9766
hclge_set_vlan_rx_offload_cfg(struct hclge_vport * vport)9767 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9768 {
9769 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9770 struct hclge_vport_vtag_rx_cfg_cmd *req;
9771 struct hclge_dev *hdev = vport->back;
9772 struct hclge_desc desc;
9773 u16 bmap_index;
9774 int status;
9775
9776 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9777
9778 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9779 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9780 vcfg->strip_tag1_en ? 1 : 0);
9781 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9782 vcfg->strip_tag2_en ? 1 : 0);
9783 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9784 vcfg->vlan1_vlan_prionly ? 1 : 0);
9785 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9786 vcfg->vlan2_vlan_prionly ? 1 : 0);
9787 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9788 vcfg->strip_tag1_discard_en ? 1 : 0);
9789 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9790 vcfg->strip_tag2_discard_en ? 1 : 0);
9791
9792 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9793 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9794 HCLGE_VF_NUM_PER_BYTE;
9795 req->vf_bitmap[bmap_index] =
9796 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9797
9798 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9799 if (status)
9800 dev_err(&hdev->pdev->dev,
9801 "Send port rxvlan cfg command fail, ret =%d\n",
9802 status);
9803
9804 return status;
9805 }
9806
hclge_vlan_offload_cfg(struct hclge_vport * vport,u16 port_base_vlan_state,u16 vlan_tag,u8 qos)9807 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9808 u16 port_base_vlan_state,
9809 u16 vlan_tag, u8 qos)
9810 {
9811 int ret;
9812
9813 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9814 vport->txvlan_cfg.accept_tag1 = true;
9815 vport->txvlan_cfg.insert_tag1_en = false;
9816 vport->txvlan_cfg.default_tag1 = 0;
9817 } else {
9818 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9819
9820 vport->txvlan_cfg.accept_tag1 =
9821 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9822 vport->txvlan_cfg.insert_tag1_en = true;
9823 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9824 vlan_tag;
9825 }
9826
9827 vport->txvlan_cfg.accept_untag1 = true;
9828
9829 /* accept_tag2 and accept_untag2 are not supported on
9830 * pdev revision(0x20), new revision support them,
9831 * this two fields can not be configured by user.
9832 */
9833 vport->txvlan_cfg.accept_tag2 = true;
9834 vport->txvlan_cfg.accept_untag2 = true;
9835 vport->txvlan_cfg.insert_tag2_en = false;
9836 vport->txvlan_cfg.default_tag2 = 0;
9837 vport->txvlan_cfg.tag_shift_mode_en = true;
9838
9839 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9840 vport->rxvlan_cfg.strip_tag1_en = false;
9841 vport->rxvlan_cfg.strip_tag2_en =
9842 vport->rxvlan_cfg.rx_vlan_offload_en;
9843 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9844 } else {
9845 vport->rxvlan_cfg.strip_tag1_en =
9846 vport->rxvlan_cfg.rx_vlan_offload_en;
9847 vport->rxvlan_cfg.strip_tag2_en = true;
9848 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9849 }
9850
9851 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9852 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9853 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9854
9855 ret = hclge_set_vlan_tx_offload_cfg(vport);
9856 if (ret)
9857 return ret;
9858
9859 return hclge_set_vlan_rx_offload_cfg(vport);
9860 }
9861
hclge_set_vlan_protocol_type(struct hclge_dev * hdev)9862 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9863 {
9864 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9865 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9866 struct hclge_desc desc;
9867 int status;
9868
9869 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9870 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9871 rx_req->ot_fst_vlan_type =
9872 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9873 rx_req->ot_sec_vlan_type =
9874 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9875 rx_req->in_fst_vlan_type =
9876 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9877 rx_req->in_sec_vlan_type =
9878 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9879
9880 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9881 if (status) {
9882 dev_err(&hdev->pdev->dev,
9883 "Send rxvlan protocol type command fail, ret =%d\n",
9884 status);
9885 return status;
9886 }
9887
9888 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9889
9890 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9891 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9892 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9893
9894 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9895 if (status)
9896 dev_err(&hdev->pdev->dev,
9897 "Send txvlan protocol type command fail, ret =%d\n",
9898 status);
9899
9900 return status;
9901 }
9902
hclge_init_vlan_filter(struct hclge_dev * hdev)9903 static int hclge_init_vlan_filter(struct hclge_dev *hdev)
9904 {
9905 struct hclge_vport *vport;
9906 bool enable = true;
9907 int ret;
9908 int i;
9909
9910 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9911 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9912 HCLGE_FILTER_FE_EGRESS_V1_B,
9913 true, 0);
9914
9915 /* for revision 0x21, vf vlan filter is per function */
9916 for (i = 0; i < hdev->num_alloc_vport; i++) {
9917 vport = &hdev->vport[i];
9918 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9919 HCLGE_FILTER_FE_EGRESS, true,
9920 vport->vport_id);
9921 if (ret)
9922 return ret;
9923 vport->cur_vlan_fltr_en = true;
9924 }
9925
9926 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) &&
9927 !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
9928 enable = false;
9929
9930 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9931 HCLGE_FILTER_FE_INGRESS, enable, 0);
9932 }
9933
hclge_init_vlan_type(struct hclge_dev * hdev)9934 static int hclge_init_vlan_type(struct hclge_dev *hdev)
9935 {
9936 hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
9937 hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
9938 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
9939 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
9940 hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
9941 hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
9942
9943 return hclge_set_vlan_protocol_type(hdev);
9944 }
9945
hclge_init_vport_vlan_offload(struct hclge_dev * hdev)9946 static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
9947 {
9948 struct hclge_port_base_vlan_config *cfg;
9949 struct hclge_vport *vport;
9950 int ret;
9951 int i;
9952
9953 for (i = 0; i < hdev->num_alloc_vport; i++) {
9954 vport = &hdev->vport[i];
9955 cfg = &vport->port_base_vlan_cfg;
9956
9957 ret = hclge_vlan_offload_cfg(vport, cfg->state,
9958 cfg->vlan_info.vlan_tag,
9959 cfg->vlan_info.qos);
9960 if (ret)
9961 return ret;
9962 }
9963 return 0;
9964 }
9965
hclge_init_vlan_config(struct hclge_dev * hdev)9966 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9967 {
9968 struct hnae3_handle *handle = &hdev->vport[0].nic;
9969 int ret;
9970
9971 ret = hclge_init_vlan_filter(hdev);
9972 if (ret)
9973 return ret;
9974
9975 ret = hclge_init_vlan_type(hdev);
9976 if (ret)
9977 return ret;
9978
9979 ret = hclge_init_vport_vlan_offload(hdev);
9980 if (ret)
9981 return ret;
9982
9983 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9984 }
9985
hclge_add_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool writen_to_tbl)9986 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9987 bool writen_to_tbl)
9988 {
9989 struct hclge_vport_vlan_cfg *vlan, *tmp;
9990 struct hclge_dev *hdev = vport->back;
9991
9992 mutex_lock(&hdev->vport_lock);
9993
9994 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9995 if (vlan->vlan_id == vlan_id) {
9996 mutex_unlock(&hdev->vport_lock);
9997 return;
9998 }
9999 }
10000
10001 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10002 if (!vlan) {
10003 mutex_unlock(&hdev->vport_lock);
10004 return;
10005 }
10006
10007 vlan->hd_tbl_status = writen_to_tbl;
10008 vlan->vlan_id = vlan_id;
10009
10010 list_add_tail(&vlan->node, &vport->vlan_list);
10011 mutex_unlock(&hdev->vport_lock);
10012 }
10013
hclge_add_vport_all_vlan_table(struct hclge_vport * vport)10014 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10015 {
10016 struct hclge_vport_vlan_cfg *vlan, *tmp;
10017 struct hclge_dev *hdev = vport->back;
10018 int ret;
10019
10020 mutex_lock(&hdev->vport_lock);
10021
10022 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10023 if (!vlan->hd_tbl_status) {
10024 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10025 vport->vport_id,
10026 vlan->vlan_id, false);
10027 if (ret) {
10028 dev_err(&hdev->pdev->dev,
10029 "restore vport vlan list failed, ret=%d\n",
10030 ret);
10031
10032 mutex_unlock(&hdev->vport_lock);
10033 return ret;
10034 }
10035 }
10036 vlan->hd_tbl_status = true;
10037 }
10038
10039 mutex_unlock(&hdev->vport_lock);
10040
10041 return 0;
10042 }
10043
hclge_rm_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool is_write_tbl)10044 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10045 bool is_write_tbl)
10046 {
10047 struct hclge_vport_vlan_cfg *vlan, *tmp;
10048 struct hclge_dev *hdev = vport->back;
10049
10050 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10051 if (vlan->vlan_id == vlan_id) {
10052 if (is_write_tbl && vlan->hd_tbl_status)
10053 hclge_set_vlan_filter_hw(hdev,
10054 htons(ETH_P_8021Q),
10055 vport->vport_id,
10056 vlan_id,
10057 true);
10058
10059 list_del(&vlan->node);
10060 kfree(vlan);
10061 break;
10062 }
10063 }
10064 }
10065
hclge_rm_vport_all_vlan_table(struct hclge_vport * vport,bool is_del_list)10066 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10067 {
10068 struct hclge_vport_vlan_cfg *vlan, *tmp;
10069 struct hclge_dev *hdev = vport->back;
10070
10071 mutex_lock(&hdev->vport_lock);
10072
10073 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10074 if (vlan->hd_tbl_status)
10075 hclge_set_vlan_filter_hw(hdev,
10076 htons(ETH_P_8021Q),
10077 vport->vport_id,
10078 vlan->vlan_id,
10079 true);
10080
10081 vlan->hd_tbl_status = false;
10082 if (is_del_list) {
10083 list_del(&vlan->node);
10084 kfree(vlan);
10085 }
10086 }
10087 clear_bit(vport->vport_id, hdev->vf_vlan_full);
10088 mutex_unlock(&hdev->vport_lock);
10089 }
10090
hclge_uninit_vport_vlan_table(struct hclge_dev * hdev)10091 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10092 {
10093 struct hclge_vport_vlan_cfg *vlan, *tmp;
10094 struct hclge_vport *vport;
10095 int i;
10096
10097 mutex_lock(&hdev->vport_lock);
10098
10099 for (i = 0; i < hdev->num_alloc_vport; i++) {
10100 vport = &hdev->vport[i];
10101 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10102 list_del(&vlan->node);
10103 kfree(vlan);
10104 }
10105 }
10106
10107 mutex_unlock(&hdev->vport_lock);
10108 }
10109
hclge_restore_vport_port_base_vlan_config(struct hclge_dev * hdev)10110 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
10111 {
10112 struct hclge_vlan_info *vlan_info;
10113 struct hclge_vport *vport;
10114 u16 vlan_proto;
10115 u16 vlan_id;
10116 u16 state;
10117 int vf_id;
10118 int ret;
10119
10120 /* PF should restore all vfs port base vlan */
10121 for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) {
10122 vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM];
10123 vlan_info = vport->port_base_vlan_cfg.tbl_sta ?
10124 &vport->port_base_vlan_cfg.vlan_info :
10125 &vport->port_base_vlan_cfg.old_vlan_info;
10126
10127 vlan_id = vlan_info->vlan_tag;
10128 vlan_proto = vlan_info->vlan_proto;
10129 state = vport->port_base_vlan_cfg.state;
10130
10131 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10132 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10133 ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10134 vport->vport_id,
10135 vlan_id, false);
10136 vport->port_base_vlan_cfg.tbl_sta = ret == 0;
10137 }
10138 }
10139 }
10140
hclge_restore_vport_vlan_table(struct hclge_vport * vport)10141 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10142 {
10143 struct hclge_vport_vlan_cfg *vlan, *tmp;
10144 struct hclge_dev *hdev = vport->back;
10145 int ret;
10146
10147 mutex_lock(&hdev->vport_lock);
10148
10149 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10150 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10151 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10152 vport->vport_id,
10153 vlan->vlan_id, false);
10154 if (ret)
10155 break;
10156 vlan->hd_tbl_status = true;
10157 }
10158 }
10159
10160 mutex_unlock(&hdev->vport_lock);
10161 }
10162
10163 /* For global reset and imp reset, hardware will clear the mac table,
10164 * so we change the mac address state from ACTIVE to TO_ADD, then they
10165 * can be restored in the service task after reset complete. Furtherly,
10166 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10167 * be restored after reset, so just remove these mac nodes from mac_list.
10168 */
hclge_mac_node_convert_for_reset(struct list_head * list)10169 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10170 {
10171 struct hclge_mac_node *mac_node, *tmp;
10172
10173 list_for_each_entry_safe(mac_node, tmp, list, node) {
10174 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10175 mac_node->state = HCLGE_MAC_TO_ADD;
10176 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10177 list_del(&mac_node->node);
10178 kfree(mac_node);
10179 }
10180 }
10181 }
10182
hclge_restore_mac_table_common(struct hclge_vport * vport)10183 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10184 {
10185 spin_lock_bh(&vport->mac_list_lock);
10186
10187 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10188 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10189 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10190
10191 spin_unlock_bh(&vport->mac_list_lock);
10192 }
10193
hclge_restore_hw_table(struct hclge_dev * hdev)10194 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10195 {
10196 struct hclge_vport *vport = &hdev->vport[0];
10197 struct hnae3_handle *handle = &vport->nic;
10198
10199 hclge_restore_mac_table_common(vport);
10200 hclge_restore_vport_port_base_vlan_config(hdev);
10201 hclge_restore_vport_vlan_table(vport);
10202 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10203 hclge_restore_fd_entries(handle);
10204 }
10205
hclge_en_hw_strip_rxvtag(struct hnae3_handle * handle,bool enable)10206 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10207 {
10208 struct hclge_vport *vport = hclge_get_vport(handle);
10209
10210 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10211 vport->rxvlan_cfg.strip_tag1_en = false;
10212 vport->rxvlan_cfg.strip_tag2_en = enable;
10213 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10214 } else {
10215 vport->rxvlan_cfg.strip_tag1_en = enable;
10216 vport->rxvlan_cfg.strip_tag2_en = true;
10217 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10218 }
10219
10220 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10221 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10222 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10223 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10224
10225 return hclge_set_vlan_rx_offload_cfg(vport);
10226 }
10227
hclge_set_vport_vlan_fltr_change(struct hclge_vport * vport)10228 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10229 {
10230 struct hclge_dev *hdev = vport->back;
10231
10232 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10233 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10234 }
10235
hclge_update_vlan_filter_entries(struct hclge_vport * vport,u16 port_base_vlan_state,struct hclge_vlan_info * new_info,struct hclge_vlan_info * old_info)10236 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10237 u16 port_base_vlan_state,
10238 struct hclge_vlan_info *new_info,
10239 struct hclge_vlan_info *old_info)
10240 {
10241 struct hclge_dev *hdev = vport->back;
10242 int ret;
10243
10244 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10245 hclge_rm_vport_all_vlan_table(vport, false);
10246 /* force clear VLAN 0 */
10247 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10248 if (ret)
10249 return ret;
10250 return hclge_set_vlan_filter_hw(hdev,
10251 htons(new_info->vlan_proto),
10252 vport->vport_id,
10253 new_info->vlan_tag,
10254 false);
10255 }
10256
10257 vport->port_base_vlan_cfg.tbl_sta = false;
10258
10259 /* force add VLAN 0 */
10260 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10261 if (ret)
10262 return ret;
10263
10264 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10265 vport->vport_id, old_info->vlan_tag,
10266 true);
10267 if (ret)
10268 return ret;
10269
10270 return hclge_add_vport_all_vlan_table(vport);
10271 }
10272
hclge_need_update_vlan_filter(const struct hclge_vlan_info * new_cfg,const struct hclge_vlan_info * old_cfg)10273 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10274 const struct hclge_vlan_info *old_cfg)
10275 {
10276 if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10277 return true;
10278
10279 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10280 return true;
10281
10282 return false;
10283 }
10284
hclge_modify_port_base_vlan_tag(struct hclge_vport * vport,struct hclge_vlan_info * new_info,struct hclge_vlan_info * old_info)10285 static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
10286 struct hclge_vlan_info *new_info,
10287 struct hclge_vlan_info *old_info)
10288 {
10289 struct hclge_dev *hdev = vport->back;
10290 int ret;
10291
10292 /* add new VLAN tag */
10293 ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
10294 vport->vport_id, new_info->vlan_tag,
10295 false);
10296 if (ret)
10297 return ret;
10298
10299 vport->port_base_vlan_cfg.tbl_sta = false;
10300 /* remove old VLAN tag */
10301 if (old_info->vlan_tag == 0)
10302 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10303 true, 0);
10304 else
10305 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10306 vport->vport_id,
10307 old_info->vlan_tag, true);
10308 if (ret)
10309 dev_err(&hdev->pdev->dev,
10310 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10311 vport->vport_id, old_info->vlan_tag, ret);
10312
10313 return ret;
10314 }
10315
hclge_update_port_base_vlan_cfg(struct hclge_vport * vport,u16 state,struct hclge_vlan_info * vlan_info)10316 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10317 struct hclge_vlan_info *vlan_info)
10318 {
10319 struct hnae3_handle *nic = &vport->nic;
10320 struct hclge_vlan_info *old_vlan_info;
10321 int ret;
10322
10323 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10324
10325 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10326 vlan_info->qos);
10327 if (ret)
10328 return ret;
10329
10330 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10331 goto out;
10332
10333 if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
10334 ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
10335 old_vlan_info);
10336 else
10337 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10338 old_vlan_info);
10339 if (ret)
10340 return ret;
10341
10342 out:
10343 vport->port_base_vlan_cfg.state = state;
10344 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10345 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10346 else
10347 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10348
10349 vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info;
10350 vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10351 vport->port_base_vlan_cfg.tbl_sta = true;
10352 hclge_set_vport_vlan_fltr_change(vport);
10353
10354 return 0;
10355 }
10356
hclge_get_port_base_vlan_state(struct hclge_vport * vport,enum hnae3_port_base_vlan_state state,u16 vlan,u8 qos)10357 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10358 enum hnae3_port_base_vlan_state state,
10359 u16 vlan, u8 qos)
10360 {
10361 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10362 if (!vlan && !qos)
10363 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10364
10365 return HNAE3_PORT_BASE_VLAN_ENABLE;
10366 }
10367
10368 if (!vlan && !qos)
10369 return HNAE3_PORT_BASE_VLAN_DISABLE;
10370
10371 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10372 vport->port_base_vlan_cfg.vlan_info.qos == qos)
10373 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10374
10375 return HNAE3_PORT_BASE_VLAN_MODIFY;
10376 }
10377
hclge_set_vf_vlan_filter(struct hnae3_handle * handle,int vfid,u16 vlan,u8 qos,__be16 proto)10378 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10379 u16 vlan, u8 qos, __be16 proto)
10380 {
10381 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10382 struct hclge_vport *vport = hclge_get_vport(handle);
10383 struct hclge_dev *hdev = vport->back;
10384 struct hclge_vlan_info vlan_info;
10385 u16 state;
10386 int ret;
10387
10388 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10389 return -EOPNOTSUPP;
10390
10391 vport = hclge_get_vf_vport(hdev, vfid);
10392 if (!vport)
10393 return -EINVAL;
10394
10395 /* qos is a 3 bits value, so can not be bigger than 7 */
10396 if (vlan > VLAN_N_VID - 1 || qos > 7)
10397 return -EINVAL;
10398 if (proto != htons(ETH_P_8021Q))
10399 return -EPROTONOSUPPORT;
10400
10401 state = hclge_get_port_base_vlan_state(vport,
10402 vport->port_base_vlan_cfg.state,
10403 vlan, qos);
10404 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10405 return 0;
10406
10407 vlan_info.vlan_tag = vlan;
10408 vlan_info.qos = qos;
10409 vlan_info.vlan_proto = ntohs(proto);
10410
10411 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10412 if (ret) {
10413 dev_err(&hdev->pdev->dev,
10414 "failed to update port base vlan for vf %d, ret = %d\n",
10415 vfid, ret);
10416 return ret;
10417 }
10418
10419 /* there is a timewindow for PF to know VF unalive, it may
10420 * cause send mailbox fail, but it doesn't matter, VF will
10421 * query it when reinit.
10422 * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10423 * VLAN state.
10424 */
10425 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
10426 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10427 (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10428 vport->vport_id,
10429 state,
10430 &vlan_info);
10431 else
10432 set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
10433 &vport->need_notify);
10434 }
10435 return 0;
10436 }
10437
hclge_clear_vf_vlan(struct hclge_dev * hdev)10438 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10439 {
10440 struct hclge_vlan_info *vlan_info;
10441 struct hclge_vport *vport;
10442 int ret;
10443 int vf;
10444
10445 /* clear port base vlan for all vf */
10446 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10447 vport = &hdev->vport[vf];
10448 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10449
10450 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10451 vport->vport_id,
10452 vlan_info->vlan_tag, true);
10453 if (ret)
10454 dev_err(&hdev->pdev->dev,
10455 "failed to clear vf vlan for vf%d, ret = %d\n",
10456 vf - HCLGE_VF_VPORT_START_NUM, ret);
10457 }
10458 }
10459
hclge_set_vlan_filter(struct hnae3_handle * handle,__be16 proto,u16 vlan_id,bool is_kill)10460 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10461 u16 vlan_id, bool is_kill)
10462 {
10463 struct hclge_vport *vport = hclge_get_vport(handle);
10464 struct hclge_dev *hdev = vport->back;
10465 bool writen_to_tbl = false;
10466 int ret = 0;
10467
10468 /* When device is resetting or reset failed, firmware is unable to
10469 * handle mailbox. Just record the vlan id, and remove it after
10470 * reset finished.
10471 */
10472 mutex_lock(&hdev->vport_lock);
10473 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10474 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10475 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10476 mutex_unlock(&hdev->vport_lock);
10477 return -EBUSY;
10478 } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) {
10479 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10480 }
10481 mutex_unlock(&hdev->vport_lock);
10482
10483 /* when port base vlan enabled, we use port base vlan as the vlan
10484 * filter entry. In this case, we don't update vlan filter table
10485 * when user add new vlan or remove exist vlan, just update the vport
10486 * vlan list. The vlan id in vlan list will be writen in vlan filter
10487 * table until port base vlan disabled
10488 */
10489 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10490 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10491 vlan_id, is_kill);
10492 writen_to_tbl = true;
10493 }
10494
10495 if (!ret) {
10496 if (!is_kill) {
10497 hclge_add_vport_vlan_table(vport, vlan_id,
10498 writen_to_tbl);
10499 } else if (is_kill && vlan_id != 0) {
10500 mutex_lock(&hdev->vport_lock);
10501 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10502 mutex_unlock(&hdev->vport_lock);
10503 }
10504 } else if (is_kill) {
10505 /* when remove hw vlan filter failed, record the vlan id,
10506 * and try to remove it from hw later, to be consistence
10507 * with stack
10508 */
10509 mutex_lock(&hdev->vport_lock);
10510 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10511 mutex_unlock(&hdev->vport_lock);
10512 }
10513
10514 hclge_set_vport_vlan_fltr_change(vport);
10515
10516 return ret;
10517 }
10518
hclge_sync_vlan_fltr_state(struct hclge_dev * hdev)10519 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10520 {
10521 struct hclge_vport *vport;
10522 int ret;
10523 u16 i;
10524
10525 for (i = 0; i < hdev->num_alloc_vport; i++) {
10526 vport = &hdev->vport[i];
10527 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10528 &vport->state))
10529 continue;
10530
10531 ret = hclge_enable_vport_vlan_filter(vport,
10532 vport->req_vlan_fltr_en);
10533 if (ret) {
10534 dev_err(&hdev->pdev->dev,
10535 "failed to sync vlan filter state for vport%u, ret = %d\n",
10536 vport->vport_id, ret);
10537 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10538 &vport->state);
10539 return;
10540 }
10541 }
10542 }
10543
hclge_sync_vlan_filter(struct hclge_dev * hdev)10544 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10545 {
10546 #define HCLGE_MAX_SYNC_COUNT 60
10547
10548 int i, ret, sync_cnt = 0;
10549 u16 vlan_id;
10550
10551 mutex_lock(&hdev->vport_lock);
10552 /* start from vport 1 for PF is always alive */
10553 for (i = 0; i < hdev->num_alloc_vport; i++) {
10554 struct hclge_vport *vport = &hdev->vport[i];
10555
10556 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10557 VLAN_N_VID);
10558 while (vlan_id != VLAN_N_VID) {
10559 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10560 vport->vport_id, vlan_id,
10561 true);
10562 if (ret && ret != -EINVAL) {
10563 mutex_unlock(&hdev->vport_lock);
10564 return;
10565 }
10566
10567 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10568 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10569 hclge_set_vport_vlan_fltr_change(vport);
10570
10571 sync_cnt++;
10572 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) {
10573 mutex_unlock(&hdev->vport_lock);
10574 return;
10575 }
10576
10577 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10578 VLAN_N_VID);
10579 }
10580 }
10581 mutex_unlock(&hdev->vport_lock);
10582
10583 hclge_sync_vlan_fltr_state(hdev);
10584 }
10585
hclge_set_mac_mtu(struct hclge_dev * hdev,int new_mps)10586 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10587 {
10588 struct hclge_config_max_frm_size_cmd *req;
10589 struct hclge_desc desc;
10590
10591 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10592
10593 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10594 req->max_frm_size = cpu_to_le16(new_mps);
10595 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10596
10597 return hclge_cmd_send(&hdev->hw, &desc, 1);
10598 }
10599
hclge_set_mtu(struct hnae3_handle * handle,int new_mtu)10600 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10601 {
10602 struct hclge_vport *vport = hclge_get_vport(handle);
10603
10604 return hclge_set_vport_mtu(vport, new_mtu);
10605 }
10606
hclge_set_vport_mtu(struct hclge_vport * vport,int new_mtu)10607 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10608 {
10609 struct hclge_dev *hdev = vport->back;
10610 int i, max_frm_size, ret;
10611
10612 /* HW supprt 2 layer vlan */
10613 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10614 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10615 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10616 return -EINVAL;
10617
10618 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10619 mutex_lock(&hdev->vport_lock);
10620 /* VF's mps must fit within hdev->mps */
10621 if (vport->vport_id && max_frm_size > hdev->mps) {
10622 mutex_unlock(&hdev->vport_lock);
10623 return -EINVAL;
10624 } else if (vport->vport_id) {
10625 vport->mps = max_frm_size;
10626 mutex_unlock(&hdev->vport_lock);
10627 return 0;
10628 }
10629
10630 /* PF's mps must be greater then VF's mps */
10631 for (i = 1; i < hdev->num_alloc_vport; i++)
10632 if (max_frm_size < hdev->vport[i].mps) {
10633 dev_err(&hdev->pdev->dev,
10634 "failed to set pf mtu for less than vport %d, mps = %u.\n",
10635 i, hdev->vport[i].mps);
10636 mutex_unlock(&hdev->vport_lock);
10637 return -EINVAL;
10638 }
10639
10640 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10641
10642 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10643 if (ret) {
10644 dev_err(&hdev->pdev->dev,
10645 "Change mtu fail, ret =%d\n", ret);
10646 goto out;
10647 }
10648
10649 hdev->mps = max_frm_size;
10650 vport->mps = max_frm_size;
10651
10652 ret = hclge_buffer_alloc(hdev);
10653 if (ret)
10654 dev_err(&hdev->pdev->dev,
10655 "Allocate buffer fail, ret =%d\n", ret);
10656
10657 out:
10658 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10659 mutex_unlock(&hdev->vport_lock);
10660 return ret;
10661 }
10662
hclge_reset_tqp_cmd_send(struct hclge_dev * hdev,u16 queue_id,bool enable)10663 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10664 bool enable)
10665 {
10666 struct hclge_reset_tqp_queue_cmd *req;
10667 struct hclge_desc desc;
10668 int ret;
10669
10670 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10671
10672 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10673 req->tqp_id = cpu_to_le16(queue_id);
10674 if (enable)
10675 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10676
10677 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10678 if (ret) {
10679 dev_err(&hdev->pdev->dev,
10680 "Send tqp reset cmd error, status =%d\n", ret);
10681 return ret;
10682 }
10683
10684 return 0;
10685 }
10686
hclge_get_reset_status(struct hclge_dev * hdev,u16 queue_id,u8 * reset_status)10687 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10688 u8 *reset_status)
10689 {
10690 struct hclge_reset_tqp_queue_cmd *req;
10691 struct hclge_desc desc;
10692 int ret;
10693
10694 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10695
10696 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10697 req->tqp_id = cpu_to_le16(queue_id);
10698
10699 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10700 if (ret) {
10701 dev_err(&hdev->pdev->dev,
10702 "Get reset status error, status =%d\n", ret);
10703 return ret;
10704 }
10705
10706 *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10707
10708 return 0;
10709 }
10710
hclge_covert_handle_qid_global(struct hnae3_handle * handle,u16 queue_id)10711 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10712 {
10713 struct hclge_comm_tqp *tqp;
10714 struct hnae3_queue *queue;
10715
10716 queue = handle->kinfo.tqp[queue_id];
10717 tqp = container_of(queue, struct hclge_comm_tqp, q);
10718
10719 return tqp->index;
10720 }
10721
hclge_reset_tqp_cmd(struct hnae3_handle * handle)10722 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10723 {
10724 struct hclge_vport *vport = hclge_get_vport(handle);
10725 struct hclge_dev *hdev = vport->back;
10726 u16 reset_try_times = 0;
10727 u8 reset_status;
10728 u16 queue_gid;
10729 int ret;
10730 u16 i;
10731
10732 for (i = 0; i < handle->kinfo.num_tqps; i++) {
10733 queue_gid = hclge_covert_handle_qid_global(handle, i);
10734 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10735 if (ret) {
10736 dev_err(&hdev->pdev->dev,
10737 "failed to send reset tqp cmd, ret = %d\n",
10738 ret);
10739 return ret;
10740 }
10741
10742 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10743 ret = hclge_get_reset_status(hdev, queue_gid,
10744 &reset_status);
10745 if (ret)
10746 return ret;
10747
10748 if (reset_status)
10749 break;
10750
10751 /* Wait for tqp hw reset */
10752 usleep_range(1000, 1200);
10753 }
10754
10755 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10756 dev_err(&hdev->pdev->dev,
10757 "wait for tqp hw reset timeout\n");
10758 return -ETIME;
10759 }
10760
10761 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10762 if (ret) {
10763 dev_err(&hdev->pdev->dev,
10764 "failed to deassert soft reset, ret = %d\n",
10765 ret);
10766 return ret;
10767 }
10768 reset_try_times = 0;
10769 }
10770 return 0;
10771 }
10772
hclge_reset_rcb(struct hnae3_handle * handle)10773 static int hclge_reset_rcb(struct hnae3_handle *handle)
10774 {
10775 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10776 #define HCLGE_RESET_RCB_SUCCESS 1U
10777
10778 struct hclge_vport *vport = hclge_get_vport(handle);
10779 struct hclge_dev *hdev = vport->back;
10780 struct hclge_reset_cmd *req;
10781 struct hclge_desc desc;
10782 u8 return_status;
10783 u16 queue_gid;
10784 int ret;
10785
10786 queue_gid = hclge_covert_handle_qid_global(handle, 0);
10787
10788 req = (struct hclge_reset_cmd *)desc.data;
10789 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10790 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10791 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10792 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10793
10794 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10795 if (ret) {
10796 dev_err(&hdev->pdev->dev,
10797 "failed to send rcb reset cmd, ret = %d\n", ret);
10798 return ret;
10799 }
10800
10801 return_status = req->fun_reset_rcb_return_status;
10802 if (return_status == HCLGE_RESET_RCB_SUCCESS)
10803 return 0;
10804
10805 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10806 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10807 return_status);
10808 return -EIO;
10809 }
10810
10811 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10812 * again to reset all tqps
10813 */
10814 return hclge_reset_tqp_cmd(handle);
10815 }
10816
hclge_reset_tqp(struct hnae3_handle * handle)10817 int hclge_reset_tqp(struct hnae3_handle *handle)
10818 {
10819 struct hclge_vport *vport = hclge_get_vport(handle);
10820 struct hclge_dev *hdev = vport->back;
10821 int ret;
10822
10823 /* only need to disable PF's tqp */
10824 if (!vport->vport_id) {
10825 ret = hclge_tqp_enable(handle, false);
10826 if (ret) {
10827 dev_err(&hdev->pdev->dev,
10828 "failed to disable tqp, ret = %d\n", ret);
10829 return ret;
10830 }
10831 }
10832
10833 return hclge_reset_rcb(handle);
10834 }
10835
hclge_get_fw_version(struct hnae3_handle * handle)10836 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10837 {
10838 struct hclge_vport *vport = hclge_get_vport(handle);
10839 struct hclge_dev *hdev = vport->back;
10840
10841 return hdev->fw_version;
10842 }
10843
hclge_set_flowctrl_adv(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)10844 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10845 {
10846 struct phy_device *phydev = hdev->hw.mac.phydev;
10847
10848 if (!phydev)
10849 return;
10850
10851 phy_set_asym_pause(phydev, rx_en, tx_en);
10852 }
10853
hclge_cfg_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)10854 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10855 {
10856 int ret;
10857
10858 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10859 return 0;
10860
10861 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10862 if (ret)
10863 dev_err(&hdev->pdev->dev,
10864 "configure pauseparam error, ret = %d.\n", ret);
10865
10866 return ret;
10867 }
10868
hclge_cfg_flowctrl(struct hclge_dev * hdev)10869 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10870 {
10871 struct phy_device *phydev = hdev->hw.mac.phydev;
10872 u16 remote_advertising = 0;
10873 u16 local_advertising;
10874 u32 rx_pause, tx_pause;
10875 u8 flowctl;
10876
10877 if (!phydev->link)
10878 return 0;
10879
10880 if (!phydev->autoneg)
10881 return hclge_mac_pause_setup_hw(hdev);
10882
10883 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10884
10885 if (phydev->pause)
10886 remote_advertising = LPA_PAUSE_CAP;
10887
10888 if (phydev->asym_pause)
10889 remote_advertising |= LPA_PAUSE_ASYM;
10890
10891 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10892 remote_advertising);
10893 tx_pause = flowctl & FLOW_CTRL_TX;
10894 rx_pause = flowctl & FLOW_CTRL_RX;
10895
10896 if (phydev->duplex == HCLGE_MAC_HALF) {
10897 tx_pause = 0;
10898 rx_pause = 0;
10899 }
10900
10901 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10902 }
10903
hclge_get_pauseparam(struct hnae3_handle * handle,u32 * auto_neg,u32 * rx_en,u32 * tx_en)10904 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10905 u32 *rx_en, u32 *tx_en)
10906 {
10907 struct hclge_vport *vport = hclge_get_vport(handle);
10908 struct hclge_dev *hdev = vport->back;
10909 u8 media_type = hdev->hw.mac.media_type;
10910
10911 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10912 hclge_get_autoneg(handle) : 0;
10913
10914 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10915 *rx_en = 0;
10916 *tx_en = 0;
10917 return;
10918 }
10919
10920 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10921 *rx_en = 1;
10922 *tx_en = 0;
10923 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10924 *tx_en = 1;
10925 *rx_en = 0;
10926 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10927 *rx_en = 1;
10928 *tx_en = 1;
10929 } else {
10930 *rx_en = 0;
10931 *tx_en = 0;
10932 }
10933 }
10934
hclge_record_user_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)10935 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10936 u32 rx_en, u32 tx_en)
10937 {
10938 if (rx_en && tx_en)
10939 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10940 else if (rx_en && !tx_en)
10941 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10942 else if (!rx_en && tx_en)
10943 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10944 else
10945 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10946
10947 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10948 }
10949
hclge_set_pauseparam(struct hnae3_handle * handle,u32 auto_neg,u32 rx_en,u32 tx_en)10950 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10951 u32 rx_en, u32 tx_en)
10952 {
10953 struct hclge_vport *vport = hclge_get_vport(handle);
10954 struct hclge_dev *hdev = vport->back;
10955 struct phy_device *phydev = hdev->hw.mac.phydev;
10956 u32 fc_autoneg;
10957
10958 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10959 fc_autoneg = hclge_get_autoneg(handle);
10960 if (auto_neg != fc_autoneg) {
10961 dev_info(&hdev->pdev->dev,
10962 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10963 return -EOPNOTSUPP;
10964 }
10965 }
10966
10967 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10968 dev_info(&hdev->pdev->dev,
10969 "Priority flow control enabled. Cannot set link flow control.\n");
10970 return -EOPNOTSUPP;
10971 }
10972
10973 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10974
10975 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10976
10977 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10978 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10979
10980 if (phydev)
10981 return phy_start_aneg(phydev);
10982
10983 return -EOPNOTSUPP;
10984 }
10985
hclge_get_ksettings_an_result(struct hnae3_handle * handle,u8 * auto_neg,u32 * speed,u8 * duplex,u32 * lane_num)10986 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10987 u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num)
10988 {
10989 struct hclge_vport *vport = hclge_get_vport(handle);
10990 struct hclge_dev *hdev = vport->back;
10991
10992 if (speed)
10993 *speed = hdev->hw.mac.speed;
10994 if (duplex)
10995 *duplex = hdev->hw.mac.duplex;
10996 if (auto_neg)
10997 *auto_neg = hdev->hw.mac.autoneg;
10998 if (lane_num)
10999 *lane_num = hdev->hw.mac.lane_num;
11000 }
11001
hclge_get_media_type(struct hnae3_handle * handle,u8 * media_type,u8 * module_type)11002 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11003 u8 *module_type)
11004 {
11005 struct hclge_vport *vport = hclge_get_vport(handle);
11006 struct hclge_dev *hdev = vport->back;
11007
11008 /* When nic is down, the service task is not running, doesn't update
11009 * the port information per second. Query the port information before
11010 * return the media type, ensure getting the correct media information.
11011 */
11012 hclge_update_port_info(hdev);
11013
11014 if (media_type)
11015 *media_type = hdev->hw.mac.media_type;
11016
11017 if (module_type)
11018 *module_type = hdev->hw.mac.module_type;
11019 }
11020
hclge_get_mdix_mode(struct hnae3_handle * handle,u8 * tp_mdix_ctrl,u8 * tp_mdix)11021 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11022 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11023 {
11024 struct hclge_vport *vport = hclge_get_vport(handle);
11025 struct hclge_dev *hdev = vport->back;
11026 struct phy_device *phydev = hdev->hw.mac.phydev;
11027 int mdix_ctrl, mdix, is_resolved;
11028 unsigned int retval;
11029
11030 if (!phydev) {
11031 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11032 *tp_mdix = ETH_TP_MDI_INVALID;
11033 return;
11034 }
11035
11036 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11037
11038 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11039 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11040 HCLGE_PHY_MDIX_CTRL_S);
11041
11042 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11043 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11044 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11045
11046 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11047
11048 switch (mdix_ctrl) {
11049 case 0x0:
11050 *tp_mdix_ctrl = ETH_TP_MDI;
11051 break;
11052 case 0x1:
11053 *tp_mdix_ctrl = ETH_TP_MDI_X;
11054 break;
11055 case 0x3:
11056 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11057 break;
11058 default:
11059 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11060 break;
11061 }
11062
11063 if (!is_resolved)
11064 *tp_mdix = ETH_TP_MDI_INVALID;
11065 else if (mdix)
11066 *tp_mdix = ETH_TP_MDI_X;
11067 else
11068 *tp_mdix = ETH_TP_MDI;
11069 }
11070
hclge_info_show(struct hclge_dev * hdev)11071 static void hclge_info_show(struct hclge_dev *hdev)
11072 {
11073 struct hnae3_handle *handle = &hdev->vport->nic;
11074 struct device *dev = &hdev->pdev->dev;
11075
11076 dev_info(dev, "PF info begin:\n");
11077
11078 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11079 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11080 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11081 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11082 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11083 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11084 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11085 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11086 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11087 dev_info(dev, "This is %s PF\n",
11088 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11089 dev_info(dev, "DCB %s\n",
11090 handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
11091 dev_info(dev, "MQPRIO %s\n",
11092 handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
11093 dev_info(dev, "Default tx spare buffer size: %u\n",
11094 hdev->tx_spare_buf_size);
11095
11096 dev_info(dev, "PF info end.\n");
11097 }
11098
hclge_init_nic_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)11099 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11100 struct hclge_vport *vport)
11101 {
11102 struct hnae3_client *client = vport->nic.client;
11103 struct hclge_dev *hdev = ae_dev->priv;
11104 int rst_cnt = hdev->rst_stats.reset_cnt;
11105 int ret;
11106
11107 ret = client->ops->init_instance(&vport->nic);
11108 if (ret)
11109 return ret;
11110
11111 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11112 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11113 rst_cnt != hdev->rst_stats.reset_cnt) {
11114 ret = -EBUSY;
11115 goto init_nic_err;
11116 }
11117
11118 /* Enable nic hw error interrupts */
11119 ret = hclge_config_nic_hw_error(hdev, true);
11120 if (ret) {
11121 dev_err(&ae_dev->pdev->dev,
11122 "fail(%d) to enable hw error interrupts\n", ret);
11123 goto init_nic_err;
11124 }
11125
11126 hnae3_set_client_init_flag(client, ae_dev, 1);
11127
11128 if (netif_msg_drv(&hdev->vport->nic))
11129 hclge_info_show(hdev);
11130
11131 return ret;
11132
11133 init_nic_err:
11134 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11135 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11136 msleep(HCLGE_WAIT_RESET_DONE);
11137
11138 client->ops->uninit_instance(&vport->nic, 0);
11139
11140 return ret;
11141 }
11142
hclge_init_roce_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)11143 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11144 struct hclge_vport *vport)
11145 {
11146 struct hclge_dev *hdev = ae_dev->priv;
11147 struct hnae3_client *client;
11148 int rst_cnt;
11149 int ret;
11150
11151 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11152 !hdev->nic_client)
11153 return 0;
11154
11155 client = hdev->roce_client;
11156 ret = hclge_init_roce_base_info(vport);
11157 if (ret)
11158 return ret;
11159
11160 rst_cnt = hdev->rst_stats.reset_cnt;
11161 ret = client->ops->init_instance(&vport->roce);
11162 if (ret)
11163 return ret;
11164
11165 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11166 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11167 rst_cnt != hdev->rst_stats.reset_cnt) {
11168 ret = -EBUSY;
11169 goto init_roce_err;
11170 }
11171
11172 /* Enable roce ras interrupts */
11173 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11174 if (ret) {
11175 dev_err(&ae_dev->pdev->dev,
11176 "fail(%d) to enable roce ras interrupts\n", ret);
11177 goto init_roce_err;
11178 }
11179
11180 hnae3_set_client_init_flag(client, ae_dev, 1);
11181
11182 return 0;
11183
11184 init_roce_err:
11185 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11186 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11187 msleep(HCLGE_WAIT_RESET_DONE);
11188
11189 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11190
11191 return ret;
11192 }
11193
hclge_init_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)11194 static int hclge_init_client_instance(struct hnae3_client *client,
11195 struct hnae3_ae_dev *ae_dev)
11196 {
11197 struct hclge_dev *hdev = ae_dev->priv;
11198 struct hclge_vport *vport = &hdev->vport[0];
11199 int ret;
11200
11201 switch (client->type) {
11202 case HNAE3_CLIENT_KNIC:
11203 hdev->nic_client = client;
11204 vport->nic.client = client;
11205 ret = hclge_init_nic_client_instance(ae_dev, vport);
11206 if (ret)
11207 goto clear_nic;
11208
11209 ret = hclge_init_roce_client_instance(ae_dev, vport);
11210 if (ret)
11211 goto clear_roce;
11212
11213 break;
11214 case HNAE3_CLIENT_ROCE:
11215 if (hnae3_dev_roce_supported(hdev)) {
11216 hdev->roce_client = client;
11217 vport->roce.client = client;
11218 }
11219
11220 ret = hclge_init_roce_client_instance(ae_dev, vport);
11221 if (ret)
11222 goto clear_roce;
11223
11224 break;
11225 default:
11226 return -EINVAL;
11227 }
11228
11229 return 0;
11230
11231 clear_nic:
11232 hdev->nic_client = NULL;
11233 vport->nic.client = NULL;
11234 return ret;
11235 clear_roce:
11236 hdev->roce_client = NULL;
11237 vport->roce.client = NULL;
11238 return ret;
11239 }
11240
hclge_uninit_need_wait(struct hclge_dev * hdev)11241 static bool hclge_uninit_need_wait(struct hclge_dev *hdev)
11242 {
11243 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11244 test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
11245 }
11246
hclge_uninit_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)11247 static void hclge_uninit_client_instance(struct hnae3_client *client,
11248 struct hnae3_ae_dev *ae_dev)
11249 {
11250 struct hclge_dev *hdev = ae_dev->priv;
11251 struct hclge_vport *vport = &hdev->vport[0];
11252
11253 if (hdev->roce_client) {
11254 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11255 while (hclge_uninit_need_wait(hdev))
11256 msleep(HCLGE_WAIT_RESET_DONE);
11257
11258 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11259 hdev->roce_client = NULL;
11260 vport->roce.client = NULL;
11261 }
11262 if (client->type == HNAE3_CLIENT_ROCE)
11263 return;
11264 if (hdev->nic_client && client->ops->uninit_instance) {
11265 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11266 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11267 msleep(HCLGE_WAIT_RESET_DONE);
11268
11269 client->ops->uninit_instance(&vport->nic, 0);
11270 hdev->nic_client = NULL;
11271 vport->nic.client = NULL;
11272 }
11273 }
11274
hclge_dev_mem_map(struct hclge_dev * hdev)11275 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11276 {
11277 struct pci_dev *pdev = hdev->pdev;
11278 struct hclge_hw *hw = &hdev->hw;
11279
11280 /* for device does not have device memory, return directly */
11281 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11282 return 0;
11283
11284 hw->hw.mem_base =
11285 devm_ioremap_wc(&pdev->dev,
11286 pci_resource_start(pdev, HCLGE_MEM_BAR),
11287 pci_resource_len(pdev, HCLGE_MEM_BAR));
11288 if (!hw->hw.mem_base) {
11289 dev_err(&pdev->dev, "failed to map device memory\n");
11290 return -EFAULT;
11291 }
11292
11293 return 0;
11294 }
11295
hclge_pci_init(struct hclge_dev * hdev)11296 static int hclge_pci_init(struct hclge_dev *hdev)
11297 {
11298 struct pci_dev *pdev = hdev->pdev;
11299 struct hclge_hw *hw;
11300 int ret;
11301
11302 ret = pci_enable_device(pdev);
11303 if (ret) {
11304 dev_err(&pdev->dev, "failed to enable PCI device\n");
11305 return ret;
11306 }
11307
11308 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11309 if (ret) {
11310 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11311 if (ret) {
11312 dev_err(&pdev->dev,
11313 "can't set consistent PCI DMA");
11314 goto err_disable_device;
11315 }
11316 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11317 }
11318
11319 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11320 if (ret) {
11321 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11322 goto err_disable_device;
11323 }
11324
11325 pci_set_master(pdev);
11326 hw = &hdev->hw;
11327 hw->hw.io_base = pcim_iomap(pdev, 2, 0);
11328 if (!hw->hw.io_base) {
11329 dev_err(&pdev->dev, "Can't map configuration register space\n");
11330 ret = -ENOMEM;
11331 goto err_release_regions;
11332 }
11333
11334 ret = hclge_dev_mem_map(hdev);
11335 if (ret)
11336 goto err_unmap_io_base;
11337
11338 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11339
11340 return 0;
11341
11342 err_unmap_io_base:
11343 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11344 err_release_regions:
11345 pci_release_regions(pdev);
11346 err_disable_device:
11347 pci_disable_device(pdev);
11348
11349 return ret;
11350 }
11351
hclge_pci_uninit(struct hclge_dev * hdev)11352 static void hclge_pci_uninit(struct hclge_dev *hdev)
11353 {
11354 struct pci_dev *pdev = hdev->pdev;
11355
11356 if (hdev->hw.hw.mem_base)
11357 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
11358
11359 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11360 pci_free_irq_vectors(pdev);
11361 pci_release_mem_regions(pdev);
11362 pci_disable_device(pdev);
11363 }
11364
hclge_state_init(struct hclge_dev * hdev)11365 static void hclge_state_init(struct hclge_dev *hdev)
11366 {
11367 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11368 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11369 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11370 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11371 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11372 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11373 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11374 }
11375
hclge_state_uninit(struct hclge_dev * hdev)11376 static void hclge_state_uninit(struct hclge_dev *hdev)
11377 {
11378 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11379 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11380
11381 if (hdev->reset_timer.function)
11382 del_timer_sync(&hdev->reset_timer);
11383 if (hdev->service_task.work.func)
11384 cancel_delayed_work_sync(&hdev->service_task);
11385 }
11386
hclge_reset_prepare_general(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)11387 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11388 enum hnae3_reset_type rst_type)
11389 {
11390 #define HCLGE_RESET_RETRY_WAIT_MS 500
11391 #define HCLGE_RESET_RETRY_CNT 5
11392
11393 struct hclge_dev *hdev = ae_dev->priv;
11394 int retry_cnt = 0;
11395 int ret;
11396
11397 while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11398 down(&hdev->reset_sem);
11399 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11400 hdev->reset_type = rst_type;
11401 ret = hclge_reset_prepare(hdev);
11402 if (!ret && !hdev->reset_pending)
11403 break;
11404
11405 dev_err(&hdev->pdev->dev,
11406 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
11407 ret, hdev->reset_pending, retry_cnt);
11408 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11409 up(&hdev->reset_sem);
11410 msleep(HCLGE_RESET_RETRY_WAIT_MS);
11411 }
11412
11413 /* disable misc vector before reset done */
11414 hclge_enable_vector(&hdev->misc_vector, false);
11415 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
11416
11417 if (hdev->reset_type == HNAE3_FLR_RESET)
11418 hdev->rst_stats.flr_rst_cnt++;
11419 }
11420
hclge_reset_done(struct hnae3_ae_dev * ae_dev)11421 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11422 {
11423 struct hclge_dev *hdev = ae_dev->priv;
11424 int ret;
11425
11426 hclge_enable_vector(&hdev->misc_vector, true);
11427
11428 ret = hclge_reset_rebuild(hdev);
11429 if (ret)
11430 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11431
11432 hdev->reset_type = HNAE3_NONE_RESET;
11433 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11434 up(&hdev->reset_sem);
11435 }
11436
hclge_clear_resetting_state(struct hclge_dev * hdev)11437 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11438 {
11439 u16 i;
11440
11441 for (i = 0; i < hdev->num_alloc_vport; i++) {
11442 struct hclge_vport *vport = &hdev->vport[i];
11443 int ret;
11444
11445 /* Send cmd to clear vport's FUNC_RST_ING */
11446 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11447 if (ret)
11448 dev_warn(&hdev->pdev->dev,
11449 "clear vport(%u) rst failed %d!\n",
11450 vport->vport_id, ret);
11451 }
11452 }
11453
hclge_clear_hw_resource(struct hclge_dev * hdev)11454 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11455 {
11456 struct hclge_desc desc;
11457 int ret;
11458
11459 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11460
11461 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11462 /* This new command is only supported by new firmware, it will
11463 * fail with older firmware. Error value -EOPNOSUPP can only be
11464 * returned by older firmware running this command, to keep code
11465 * backward compatible we will override this value and return
11466 * success.
11467 */
11468 if (ret && ret != -EOPNOTSUPP) {
11469 dev_err(&hdev->pdev->dev,
11470 "failed to clear hw resource, ret = %d\n", ret);
11471 return ret;
11472 }
11473 return 0;
11474 }
11475
hclge_init_rxd_adv_layout(struct hclge_dev * hdev)11476 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11477 {
11478 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11479 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11480 }
11481
hclge_uninit_rxd_adv_layout(struct hclge_dev * hdev)11482 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11483 {
11484 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11485 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11486 }
11487
hclge_get_wol_info(struct hnae3_handle * handle)11488 static struct hclge_wol_info *hclge_get_wol_info(struct hnae3_handle *handle)
11489 {
11490 struct hclge_vport *vport = hclge_get_vport(handle);
11491
11492 return &vport->back->hw.mac.wol;
11493 }
11494
hclge_get_wol_supported_mode(struct hclge_dev * hdev,u32 * wol_supported)11495 static int hclge_get_wol_supported_mode(struct hclge_dev *hdev,
11496 u32 *wol_supported)
11497 {
11498 struct hclge_query_wol_supported_cmd *wol_supported_cmd;
11499 struct hclge_desc desc;
11500 int ret;
11501
11502 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_GET_SUPPORTED_MODE,
11503 true);
11504 wol_supported_cmd = (struct hclge_query_wol_supported_cmd *)desc.data;
11505
11506 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11507 if (ret) {
11508 dev_err(&hdev->pdev->dev,
11509 "failed to query wol supported, ret = %d\n", ret);
11510 return ret;
11511 }
11512
11513 *wol_supported = le32_to_cpu(wol_supported_cmd->supported_wake_mode);
11514
11515 return 0;
11516 }
11517
hclge_set_wol_cfg(struct hclge_dev * hdev,struct hclge_wol_info * wol_info)11518 static int hclge_set_wol_cfg(struct hclge_dev *hdev,
11519 struct hclge_wol_info *wol_info)
11520 {
11521 struct hclge_wol_cfg_cmd *wol_cfg_cmd;
11522 struct hclge_desc desc;
11523 int ret;
11524
11525 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_CFG, false);
11526 wol_cfg_cmd = (struct hclge_wol_cfg_cmd *)desc.data;
11527 wol_cfg_cmd->wake_on_lan_mode = cpu_to_le32(wol_info->wol_current_mode);
11528 wol_cfg_cmd->sopass_size = wol_info->wol_sopass_size;
11529 memcpy(wol_cfg_cmd->sopass, wol_info->wol_sopass, SOPASS_MAX);
11530
11531 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11532 if (ret)
11533 dev_err(&hdev->pdev->dev,
11534 "failed to set wol config, ret = %d\n", ret);
11535
11536 return ret;
11537 }
11538
hclge_update_wol(struct hclge_dev * hdev)11539 static int hclge_update_wol(struct hclge_dev *hdev)
11540 {
11541 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
11542
11543 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
11544 return 0;
11545
11546 return hclge_set_wol_cfg(hdev, wol_info);
11547 }
11548
hclge_init_wol(struct hclge_dev * hdev)11549 static int hclge_init_wol(struct hclge_dev *hdev)
11550 {
11551 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
11552 int ret;
11553
11554 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
11555 return 0;
11556
11557 memset(wol_info, 0, sizeof(struct hclge_wol_info));
11558 ret = hclge_get_wol_supported_mode(hdev,
11559 &wol_info->wol_support_mode);
11560 if (ret) {
11561 wol_info->wol_support_mode = 0;
11562 return ret;
11563 }
11564
11565 return hclge_update_wol(hdev);
11566 }
11567
hclge_get_wol(struct hnae3_handle * handle,struct ethtool_wolinfo * wol)11568 static void hclge_get_wol(struct hnae3_handle *handle,
11569 struct ethtool_wolinfo *wol)
11570 {
11571 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
11572
11573 wol->supported = wol_info->wol_support_mode;
11574 wol->wolopts = wol_info->wol_current_mode;
11575 if (wol_info->wol_current_mode & WAKE_MAGICSECURE)
11576 memcpy(wol->sopass, wol_info->wol_sopass, SOPASS_MAX);
11577 }
11578
hclge_set_wol(struct hnae3_handle * handle,struct ethtool_wolinfo * wol)11579 static int hclge_set_wol(struct hnae3_handle *handle,
11580 struct ethtool_wolinfo *wol)
11581 {
11582 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
11583 struct hclge_vport *vport = hclge_get_vport(handle);
11584 u32 wol_mode;
11585 int ret;
11586
11587 wol_mode = wol->wolopts;
11588 if (wol_mode & ~wol_info->wol_support_mode)
11589 return -EINVAL;
11590
11591 wol_info->wol_current_mode = wol_mode;
11592 if (wol_mode & WAKE_MAGICSECURE) {
11593 memcpy(wol_info->wol_sopass, wol->sopass, SOPASS_MAX);
11594 wol_info->wol_sopass_size = SOPASS_MAX;
11595 } else {
11596 wol_info->wol_sopass_size = 0;
11597 }
11598
11599 ret = hclge_set_wol_cfg(vport->back, wol_info);
11600 if (ret)
11601 wol_info->wol_current_mode = 0;
11602
11603 return ret;
11604 }
11605
hclge_init_ae_dev(struct hnae3_ae_dev * ae_dev)11606 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11607 {
11608 struct pci_dev *pdev = ae_dev->pdev;
11609 struct hclge_dev *hdev;
11610 int ret;
11611
11612 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11613 if (!hdev)
11614 return -ENOMEM;
11615
11616 hdev->pdev = pdev;
11617 hdev->ae_dev = ae_dev;
11618 hdev->reset_type = HNAE3_NONE_RESET;
11619 hdev->reset_level = HNAE3_FUNC_RESET;
11620 ae_dev->priv = hdev;
11621
11622 /* HW supprt 2 layer vlan */
11623 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11624
11625 mutex_init(&hdev->vport_lock);
11626 spin_lock_init(&hdev->fd_rule_lock);
11627 sema_init(&hdev->reset_sem, 1);
11628
11629 ret = hclge_pci_init(hdev);
11630 if (ret)
11631 goto out;
11632
11633 /* Firmware command queue initialize */
11634 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
11635 if (ret)
11636 goto err_pci_uninit;
11637
11638 /* Firmware command initialize */
11639 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
11640 true, hdev->reset_pending);
11641 if (ret)
11642 goto err_cmd_uninit;
11643
11644 ret = hclge_clear_hw_resource(hdev);
11645 if (ret)
11646 goto err_cmd_uninit;
11647
11648 ret = hclge_get_cap(hdev);
11649 if (ret)
11650 goto err_cmd_uninit;
11651
11652 ret = hclge_query_dev_specs(hdev);
11653 if (ret) {
11654 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11655 ret);
11656 goto err_cmd_uninit;
11657 }
11658
11659 ret = hclge_configure(hdev);
11660 if (ret) {
11661 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11662 goto err_cmd_uninit;
11663 }
11664
11665 ret = hclge_init_msi(hdev);
11666 if (ret) {
11667 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11668 goto err_cmd_uninit;
11669 }
11670
11671 ret = hclge_misc_irq_init(hdev);
11672 if (ret)
11673 goto err_msi_uninit;
11674
11675 ret = hclge_alloc_tqps(hdev);
11676 if (ret) {
11677 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11678 goto err_msi_irq_uninit;
11679 }
11680
11681 ret = hclge_alloc_vport(hdev);
11682 if (ret)
11683 goto err_msi_irq_uninit;
11684
11685 ret = hclge_map_tqp(hdev);
11686 if (ret)
11687 goto err_msi_irq_uninit;
11688
11689 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
11690 clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
11691 if (hnae3_dev_phy_imp_supported(hdev))
11692 ret = hclge_update_tp_port_info(hdev);
11693 else
11694 ret = hclge_mac_mdio_config(hdev);
11695
11696 if (ret)
11697 goto err_msi_irq_uninit;
11698 }
11699
11700 ret = hclge_init_umv_space(hdev);
11701 if (ret)
11702 goto err_mdiobus_unreg;
11703
11704 ret = hclge_mac_init(hdev);
11705 if (ret) {
11706 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11707 goto err_mdiobus_unreg;
11708 }
11709
11710 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11711 if (ret) {
11712 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11713 goto err_mdiobus_unreg;
11714 }
11715
11716 ret = hclge_config_gro(hdev);
11717 if (ret)
11718 goto err_mdiobus_unreg;
11719
11720 ret = hclge_init_vlan_config(hdev);
11721 if (ret) {
11722 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11723 goto err_mdiobus_unreg;
11724 }
11725
11726 ret = hclge_tm_schd_init(hdev);
11727 if (ret) {
11728 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11729 goto err_mdiobus_unreg;
11730 }
11731
11732 ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev,
11733 &hdev->rss_cfg);
11734 if (ret) {
11735 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11736 goto err_mdiobus_unreg;
11737 }
11738
11739 ret = hclge_rss_init_hw(hdev);
11740 if (ret) {
11741 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11742 goto err_mdiobus_unreg;
11743 }
11744
11745 ret = init_mgr_tbl(hdev);
11746 if (ret) {
11747 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11748 goto err_mdiobus_unreg;
11749 }
11750
11751 ret = hclge_init_fd_config(hdev);
11752 if (ret) {
11753 dev_err(&pdev->dev,
11754 "fd table init fail, ret=%d\n", ret);
11755 goto err_mdiobus_unreg;
11756 }
11757
11758 ret = hclge_ptp_init(hdev);
11759 if (ret)
11760 goto err_mdiobus_unreg;
11761
11762 ret = hclge_update_port_info(hdev);
11763 if (ret)
11764 goto err_ptp_uninit;
11765
11766 INIT_KFIFO(hdev->mac_tnl_log);
11767
11768 hclge_dcb_ops_set(hdev);
11769
11770 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11771 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11772
11773 hclge_clear_all_event_cause(hdev);
11774 hclge_clear_resetting_state(hdev);
11775
11776 /* Log and clear the hw errors those already occurred */
11777 if (hnae3_dev_ras_imp_supported(hdev))
11778 hclge_handle_occurred_error(hdev);
11779 else
11780 hclge_handle_all_hns_hw_errors(ae_dev);
11781
11782 /* request delayed reset for the error recovery because an immediate
11783 * global reset on a PF affecting pending initialization of other PFs
11784 */
11785 if (ae_dev->hw_err_reset_req) {
11786 enum hnae3_reset_type reset_level;
11787
11788 reset_level = hclge_get_reset_level(ae_dev,
11789 &ae_dev->hw_err_reset_req);
11790 hclge_set_def_reset_request(ae_dev, reset_level);
11791 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11792 }
11793
11794 hclge_init_rxd_adv_layout(hdev);
11795
11796 /* Enable MISC vector(vector0) */
11797 hclge_enable_vector(&hdev->misc_vector, true);
11798
11799 ret = hclge_init_wol(hdev);
11800 if (ret)
11801 dev_warn(&pdev->dev,
11802 "failed to wake on lan init, ret = %d\n", ret);
11803
11804 ret = hclge_devlink_init(hdev);
11805 if (ret)
11806 goto err_ptp_uninit;
11807
11808 hclge_state_init(hdev);
11809 hdev->last_reset_time = jiffies;
11810
11811 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11812 HCLGE_DRIVER_NAME);
11813
11814 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11815 return 0;
11816
11817 err_ptp_uninit:
11818 hclge_ptp_uninit(hdev);
11819 err_mdiobus_unreg:
11820 if (hdev->hw.mac.phydev)
11821 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11822 err_msi_irq_uninit:
11823 hclge_misc_irq_uninit(hdev);
11824 err_msi_uninit:
11825 pci_free_irq_vectors(pdev);
11826 err_cmd_uninit:
11827 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
11828 err_pci_uninit:
11829 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11830 pci_release_regions(pdev);
11831 pci_disable_device(pdev);
11832 out:
11833 mutex_destroy(&hdev->vport_lock);
11834 return ret;
11835 }
11836
hclge_stats_clear(struct hclge_dev * hdev)11837 static void hclge_stats_clear(struct hclge_dev *hdev)
11838 {
11839 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11840 memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats));
11841 }
11842
hclge_set_mac_spoofchk(struct hclge_dev * hdev,int vf,bool enable)11843 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11844 {
11845 return hclge_config_switch_param(hdev, vf, enable,
11846 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11847 }
11848
hclge_set_vlan_spoofchk(struct hclge_dev * hdev,int vf,bool enable)11849 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11850 {
11851 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11852 HCLGE_FILTER_FE_NIC_INGRESS_B,
11853 enable, vf);
11854 }
11855
hclge_set_vf_spoofchk_hw(struct hclge_dev * hdev,int vf,bool enable)11856 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11857 {
11858 int ret;
11859
11860 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11861 if (ret) {
11862 dev_err(&hdev->pdev->dev,
11863 "Set vf %d mac spoof check %s failed, ret=%d\n",
11864 vf, enable ? "on" : "off", ret);
11865 return ret;
11866 }
11867
11868 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11869 if (ret)
11870 dev_err(&hdev->pdev->dev,
11871 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11872 vf, enable ? "on" : "off", ret);
11873
11874 return ret;
11875 }
11876
hclge_set_vf_spoofchk(struct hnae3_handle * handle,int vf,bool enable)11877 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11878 bool enable)
11879 {
11880 struct hclge_vport *vport = hclge_get_vport(handle);
11881 struct hclge_dev *hdev = vport->back;
11882 u32 new_spoofchk = enable ? 1 : 0;
11883 int ret;
11884
11885 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11886 return -EOPNOTSUPP;
11887
11888 vport = hclge_get_vf_vport(hdev, vf);
11889 if (!vport)
11890 return -EINVAL;
11891
11892 if (vport->vf_info.spoofchk == new_spoofchk)
11893 return 0;
11894
11895 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11896 dev_warn(&hdev->pdev->dev,
11897 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11898 vf);
11899 else if (enable && hclge_is_umv_space_full(vport, true))
11900 dev_warn(&hdev->pdev->dev,
11901 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11902 vf);
11903
11904 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11905 if (ret)
11906 return ret;
11907
11908 vport->vf_info.spoofchk = new_spoofchk;
11909 return 0;
11910 }
11911
hclge_reset_vport_spoofchk(struct hclge_dev * hdev)11912 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11913 {
11914 struct hclge_vport *vport = hdev->vport;
11915 int ret;
11916 int i;
11917
11918 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11919 return 0;
11920
11921 /* resume the vf spoof check state after reset */
11922 for (i = 0; i < hdev->num_alloc_vport; i++) {
11923 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11924 vport->vf_info.spoofchk);
11925 if (ret)
11926 return ret;
11927
11928 vport++;
11929 }
11930
11931 return 0;
11932 }
11933
hclge_set_vf_trust(struct hnae3_handle * handle,int vf,bool enable)11934 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11935 {
11936 struct hclge_vport *vport = hclge_get_vport(handle);
11937 struct hclge_dev *hdev = vport->back;
11938 u32 new_trusted = enable ? 1 : 0;
11939
11940 vport = hclge_get_vf_vport(hdev, vf);
11941 if (!vport)
11942 return -EINVAL;
11943
11944 if (vport->vf_info.trusted == new_trusted)
11945 return 0;
11946
11947 vport->vf_info.trusted = new_trusted;
11948 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11949 hclge_task_schedule(hdev, 0);
11950
11951 return 0;
11952 }
11953
hclge_reset_vf_rate(struct hclge_dev * hdev)11954 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11955 {
11956 int ret;
11957 int vf;
11958
11959 /* reset vf rate to default value */
11960 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11961 struct hclge_vport *vport = &hdev->vport[vf];
11962
11963 vport->vf_info.max_tx_rate = 0;
11964 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11965 if (ret)
11966 dev_err(&hdev->pdev->dev,
11967 "vf%d failed to reset to default, ret=%d\n",
11968 vf - HCLGE_VF_VPORT_START_NUM, ret);
11969 }
11970 }
11971
hclge_vf_rate_param_check(struct hclge_dev * hdev,int min_tx_rate,int max_tx_rate)11972 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11973 int min_tx_rate, int max_tx_rate)
11974 {
11975 if (min_tx_rate != 0 ||
11976 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11977 dev_err(&hdev->pdev->dev,
11978 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11979 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11980 return -EINVAL;
11981 }
11982
11983 return 0;
11984 }
11985
hclge_set_vf_rate(struct hnae3_handle * handle,int vf,int min_tx_rate,int max_tx_rate,bool force)11986 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11987 int min_tx_rate, int max_tx_rate, bool force)
11988 {
11989 struct hclge_vport *vport = hclge_get_vport(handle);
11990 struct hclge_dev *hdev = vport->back;
11991 int ret;
11992
11993 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11994 if (ret)
11995 return ret;
11996
11997 vport = hclge_get_vf_vport(hdev, vf);
11998 if (!vport)
11999 return -EINVAL;
12000
12001 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
12002 return 0;
12003
12004 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
12005 if (ret)
12006 return ret;
12007
12008 vport->vf_info.max_tx_rate = max_tx_rate;
12009
12010 return 0;
12011 }
12012
hclge_resume_vf_rate(struct hclge_dev * hdev)12013 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
12014 {
12015 struct hnae3_handle *handle = &hdev->vport->nic;
12016 struct hclge_vport *vport;
12017 int ret;
12018 int vf;
12019
12020 /* resume the vf max_tx_rate after reset */
12021 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
12022 vport = hclge_get_vf_vport(hdev, vf);
12023 if (!vport)
12024 return -EINVAL;
12025
12026 /* zero means max rate, after reset, firmware already set it to
12027 * max rate, so just continue.
12028 */
12029 if (!vport->vf_info.max_tx_rate)
12030 continue;
12031
12032 ret = hclge_set_vf_rate(handle, vf, 0,
12033 vport->vf_info.max_tx_rate, true);
12034 if (ret) {
12035 dev_err(&hdev->pdev->dev,
12036 "vf%d failed to resume tx_rate:%u, ret=%d\n",
12037 vf, vport->vf_info.max_tx_rate, ret);
12038 return ret;
12039 }
12040 }
12041
12042 return 0;
12043 }
12044
hclge_reset_vport_state(struct hclge_dev * hdev)12045 static void hclge_reset_vport_state(struct hclge_dev *hdev)
12046 {
12047 struct hclge_vport *vport = hdev->vport;
12048 int i;
12049
12050 for (i = 0; i < hdev->num_alloc_vport; i++) {
12051 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
12052 vport++;
12053 }
12054 }
12055
hclge_reset_ae_dev(struct hnae3_ae_dev * ae_dev)12056 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
12057 {
12058 struct hclge_dev *hdev = ae_dev->priv;
12059 struct pci_dev *pdev = ae_dev->pdev;
12060 int ret;
12061
12062 set_bit(HCLGE_STATE_DOWN, &hdev->state);
12063
12064 hclge_stats_clear(hdev);
12065 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
12066 * so here should not clean table in memory.
12067 */
12068 if (hdev->reset_type == HNAE3_IMP_RESET ||
12069 hdev->reset_type == HNAE3_GLOBAL_RESET) {
12070 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
12071 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
12072 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
12073 hclge_reset_umv_space(hdev);
12074 }
12075
12076 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
12077 true, hdev->reset_pending);
12078 if (ret) {
12079 dev_err(&pdev->dev, "Cmd queue init failed\n");
12080 return ret;
12081 }
12082
12083 ret = hclge_map_tqp(hdev);
12084 if (ret) {
12085 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
12086 return ret;
12087 }
12088
12089 ret = hclge_mac_init(hdev);
12090 if (ret) {
12091 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
12092 return ret;
12093 }
12094
12095 ret = hclge_tp_port_init(hdev);
12096 if (ret) {
12097 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
12098 ret);
12099 return ret;
12100 }
12101
12102 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12103 if (ret) {
12104 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
12105 return ret;
12106 }
12107
12108 ret = hclge_config_gro(hdev);
12109 if (ret)
12110 return ret;
12111
12112 ret = hclge_init_vlan_config(hdev);
12113 if (ret) {
12114 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12115 return ret;
12116 }
12117
12118 ret = hclge_tm_init_hw(hdev, true);
12119 if (ret) {
12120 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12121 return ret;
12122 }
12123
12124 ret = hclge_rss_init_hw(hdev);
12125 if (ret) {
12126 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12127 return ret;
12128 }
12129
12130 ret = init_mgr_tbl(hdev);
12131 if (ret) {
12132 dev_err(&pdev->dev,
12133 "failed to reinit manager table, ret = %d\n", ret);
12134 return ret;
12135 }
12136
12137 ret = hclge_init_fd_config(hdev);
12138 if (ret) {
12139 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12140 return ret;
12141 }
12142
12143 ret = hclge_ptp_init(hdev);
12144 if (ret)
12145 return ret;
12146
12147 /* Log and clear the hw errors those already occurred */
12148 if (hnae3_dev_ras_imp_supported(hdev))
12149 hclge_handle_occurred_error(hdev);
12150 else
12151 hclge_handle_all_hns_hw_errors(ae_dev);
12152
12153 /* Re-enable the hw error interrupts because
12154 * the interrupts get disabled on global reset.
12155 */
12156 ret = hclge_config_nic_hw_error(hdev, true);
12157 if (ret) {
12158 dev_err(&pdev->dev,
12159 "fail(%d) to re-enable NIC hw error interrupts\n",
12160 ret);
12161 return ret;
12162 }
12163
12164 if (hdev->roce_client) {
12165 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12166 if (ret) {
12167 dev_err(&pdev->dev,
12168 "fail(%d) to re-enable roce ras interrupts\n",
12169 ret);
12170 return ret;
12171 }
12172 }
12173
12174 hclge_reset_vport_state(hdev);
12175 ret = hclge_reset_vport_spoofchk(hdev);
12176 if (ret)
12177 return ret;
12178
12179 ret = hclge_resume_vf_rate(hdev);
12180 if (ret)
12181 return ret;
12182
12183 hclge_init_rxd_adv_layout(hdev);
12184
12185 ret = hclge_update_wol(hdev);
12186 if (ret)
12187 dev_warn(&pdev->dev,
12188 "failed to update wol config, ret = %d\n", ret);
12189
12190 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12191 HCLGE_DRIVER_NAME);
12192
12193 return 0;
12194 }
12195
hclge_uninit_ae_dev(struct hnae3_ae_dev * ae_dev)12196 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12197 {
12198 struct hclge_dev *hdev = ae_dev->priv;
12199 struct hclge_mac *mac = &hdev->hw.mac;
12200
12201 hclge_reset_vf_rate(hdev);
12202 hclge_clear_vf_vlan(hdev);
12203 hclge_state_uninit(hdev);
12204 hclge_ptp_uninit(hdev);
12205 hclge_uninit_rxd_adv_layout(hdev);
12206 hclge_uninit_mac_table(hdev);
12207 hclge_del_all_fd_entries(hdev);
12208
12209 if (mac->phydev)
12210 mdiobus_unregister(mac->mdio_bus);
12211
12212 /* Disable MISC vector(vector0) */
12213 hclge_enable_vector(&hdev->misc_vector, false);
12214 synchronize_irq(hdev->misc_vector.vector_irq);
12215
12216 /* Disable all hw interrupts */
12217 hclge_config_mac_tnl_int(hdev, false);
12218 hclge_config_nic_hw_error(hdev, false);
12219 hclge_config_rocee_ras_interrupt(hdev, false);
12220
12221 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
12222 hclge_misc_irq_uninit(hdev);
12223 hclge_devlink_uninit(hdev);
12224 hclge_pci_uninit(hdev);
12225 hclge_uninit_vport_vlan_table(hdev);
12226 mutex_destroy(&hdev->vport_lock);
12227 ae_dev->priv = NULL;
12228 }
12229
hclge_get_max_channels(struct hnae3_handle * handle)12230 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12231 {
12232 struct hclge_vport *vport = hclge_get_vport(handle);
12233 struct hclge_dev *hdev = vport->back;
12234
12235 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12236 }
12237
hclge_get_channels(struct hnae3_handle * handle,struct ethtool_channels * ch)12238 static void hclge_get_channels(struct hnae3_handle *handle,
12239 struct ethtool_channels *ch)
12240 {
12241 ch->max_combined = hclge_get_max_channels(handle);
12242 ch->other_count = 1;
12243 ch->max_other = 1;
12244 ch->combined_count = handle->kinfo.rss_size;
12245 }
12246
hclge_get_tqps_and_rss_info(struct hnae3_handle * handle,u16 * alloc_tqps,u16 * max_rss_size)12247 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12248 u16 *alloc_tqps, u16 *max_rss_size)
12249 {
12250 struct hclge_vport *vport = hclge_get_vport(handle);
12251 struct hclge_dev *hdev = vport->back;
12252
12253 *alloc_tqps = vport->alloc_tqps;
12254 *max_rss_size = hdev->pf_rss_size_max;
12255 }
12256
hclge_set_rss_tc_mode_cfg(struct hnae3_handle * handle)12257 static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
12258 {
12259 struct hclge_vport *vport = hclge_get_vport(handle);
12260 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12261 struct hclge_dev *hdev = vport->back;
12262 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12263 u16 tc_valid[HCLGE_MAX_TC_NUM];
12264 u16 roundup_size;
12265 unsigned int i;
12266
12267 roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size);
12268 roundup_size = ilog2(roundup_size);
12269 /* Set the RSS TC mode according to the new RSS size */
12270 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12271 tc_valid[i] = 0;
12272
12273 if (!(hdev->hw_tc_map & BIT(i)))
12274 continue;
12275
12276 tc_valid[i] = 1;
12277 tc_size[i] = roundup_size;
12278 tc_offset[i] = vport->nic.kinfo.rss_size * i;
12279 }
12280
12281 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
12282 tc_size);
12283 }
12284
hclge_set_channels(struct hnae3_handle * handle,u32 new_tqps_num,bool rxfh_configured)12285 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12286 bool rxfh_configured)
12287 {
12288 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12289 struct hclge_vport *vport = hclge_get_vport(handle);
12290 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12291 struct hclge_dev *hdev = vport->back;
12292 u16 cur_rss_size = kinfo->rss_size;
12293 u16 cur_tqps = kinfo->num_tqps;
12294 u32 *rss_indir;
12295 unsigned int i;
12296 int ret;
12297
12298 kinfo->req_rss_size = new_tqps_num;
12299
12300 ret = hclge_tm_vport_map_update(hdev);
12301 if (ret) {
12302 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12303 return ret;
12304 }
12305
12306 ret = hclge_set_rss_tc_mode_cfg(handle);
12307 if (ret)
12308 return ret;
12309
12310 /* RSS indirection table has been configured by user */
12311 if (rxfh_configured)
12312 goto out;
12313
12314 /* Reinitializes the rss indirect table according to the new RSS size */
12315 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12316 GFP_KERNEL);
12317 if (!rss_indir)
12318 return -ENOMEM;
12319
12320 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12321 rss_indir[i] = i % kinfo->rss_size;
12322
12323 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12324 if (ret)
12325 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12326 ret);
12327
12328 kfree(rss_indir);
12329
12330 out:
12331 if (!ret)
12332 dev_info(&hdev->pdev->dev,
12333 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12334 cur_rss_size, kinfo->rss_size,
12335 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12336
12337 return ret;
12338 }
12339
hclge_set_led_status(struct hclge_dev * hdev,u8 locate_led_status)12340 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12341 {
12342 struct hclge_set_led_state_cmd *req;
12343 struct hclge_desc desc;
12344 int ret;
12345
12346 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12347
12348 req = (struct hclge_set_led_state_cmd *)desc.data;
12349 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12350 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12351
12352 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12353 if (ret)
12354 dev_err(&hdev->pdev->dev,
12355 "Send set led state cmd error, ret =%d\n", ret);
12356
12357 return ret;
12358 }
12359
12360 enum hclge_led_status {
12361 HCLGE_LED_OFF,
12362 HCLGE_LED_ON,
12363 HCLGE_LED_NO_CHANGE = 0xFF,
12364 };
12365
hclge_set_led_id(struct hnae3_handle * handle,enum ethtool_phys_id_state status)12366 static int hclge_set_led_id(struct hnae3_handle *handle,
12367 enum ethtool_phys_id_state status)
12368 {
12369 struct hclge_vport *vport = hclge_get_vport(handle);
12370 struct hclge_dev *hdev = vport->back;
12371
12372 switch (status) {
12373 case ETHTOOL_ID_ACTIVE:
12374 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12375 case ETHTOOL_ID_INACTIVE:
12376 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12377 default:
12378 return -EINVAL;
12379 }
12380 }
12381
hclge_get_link_mode(struct hnae3_handle * handle,unsigned long * supported,unsigned long * advertising)12382 static void hclge_get_link_mode(struct hnae3_handle *handle,
12383 unsigned long *supported,
12384 unsigned long *advertising)
12385 {
12386 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12387 struct hclge_vport *vport = hclge_get_vport(handle);
12388 struct hclge_dev *hdev = vport->back;
12389 unsigned int idx = 0;
12390
12391 for (; idx < size; idx++) {
12392 supported[idx] = hdev->hw.mac.supported[idx];
12393 advertising[idx] = hdev->hw.mac.advertising[idx];
12394 }
12395 }
12396
hclge_gro_en(struct hnae3_handle * handle,bool enable)12397 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12398 {
12399 struct hclge_vport *vport = hclge_get_vport(handle);
12400 struct hclge_dev *hdev = vport->back;
12401 bool gro_en_old = hdev->gro_en;
12402 int ret;
12403
12404 hdev->gro_en = enable;
12405 ret = hclge_config_gro(hdev);
12406 if (ret)
12407 hdev->gro_en = gro_en_old;
12408
12409 return ret;
12410 }
12411
hclge_sync_vport_promisc_mode(struct hclge_vport * vport)12412 static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport)
12413 {
12414 struct hnae3_handle *handle = &vport->nic;
12415 struct hclge_dev *hdev = vport->back;
12416 bool uc_en = false;
12417 bool mc_en = false;
12418 u8 tmp_flags;
12419 bool bc_en;
12420 int ret;
12421
12422 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12423 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12424 vport->last_promisc_flags = vport->overflow_promisc_flags;
12425 }
12426
12427 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12428 &vport->state))
12429 return 0;
12430
12431 /* for PF */
12432 if (!vport->vport_id) {
12433 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12434 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12435 tmp_flags & HNAE3_MPE);
12436 if (!ret)
12437 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12438 &vport->state);
12439 else
12440 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12441 &vport->state);
12442 return ret;
12443 }
12444
12445 /* for VF */
12446 if (vport->vf_info.trusted) {
12447 uc_en = vport->vf_info.request_uc_en > 0 ||
12448 vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE;
12449 mc_en = vport->vf_info.request_mc_en > 0 ||
12450 vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE;
12451 }
12452 bc_en = vport->vf_info.request_bc_en > 0;
12453
12454 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12455 mc_en, bc_en);
12456 if (ret) {
12457 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12458 return ret;
12459 }
12460 hclge_set_vport_vlan_fltr_change(vport);
12461
12462 return 0;
12463 }
12464
hclge_sync_promisc_mode(struct hclge_dev * hdev)12465 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12466 {
12467 struct hclge_vport *vport;
12468 int ret;
12469 u16 i;
12470
12471 for (i = 0; i < hdev->num_alloc_vport; i++) {
12472 vport = &hdev->vport[i];
12473
12474 ret = hclge_sync_vport_promisc_mode(vport);
12475 if (ret)
12476 return;
12477 }
12478 }
12479
hclge_module_existed(struct hclge_dev * hdev)12480 static bool hclge_module_existed(struct hclge_dev *hdev)
12481 {
12482 struct hclge_desc desc;
12483 u32 existed;
12484 int ret;
12485
12486 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12487 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12488 if (ret) {
12489 dev_err(&hdev->pdev->dev,
12490 "failed to get SFP exist state, ret = %d\n", ret);
12491 return false;
12492 }
12493
12494 existed = le32_to_cpu(desc.data[0]);
12495
12496 return existed != 0;
12497 }
12498
12499 /* need 6 bds(total 140 bytes) in one reading
12500 * return the number of bytes actually read, 0 means read failed.
12501 */
hclge_get_sfp_eeprom_info(struct hclge_dev * hdev,u32 offset,u32 len,u8 * data)12502 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12503 u32 len, u8 *data)
12504 {
12505 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12506 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12507 u16 read_len;
12508 u16 copy_len;
12509 int ret;
12510 int i;
12511
12512 /* setup all 6 bds to read module eeprom info. */
12513 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12514 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12515 true);
12516
12517 /* bd0~bd4 need next flag */
12518 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12519 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
12520 }
12521
12522 /* setup bd0, this bd contains offset and read length. */
12523 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12524 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12525 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12526 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12527
12528 ret = hclge_cmd_send(&hdev->hw, desc, i);
12529 if (ret) {
12530 dev_err(&hdev->pdev->dev,
12531 "failed to get SFP eeprom info, ret = %d\n", ret);
12532 return 0;
12533 }
12534
12535 /* copy sfp info from bd0 to out buffer. */
12536 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12537 memcpy(data, sfp_info_bd0->data, copy_len);
12538 read_len = copy_len;
12539
12540 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12541 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12542 if (read_len >= len)
12543 return read_len;
12544
12545 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12546 memcpy(data + read_len, desc[i].data, copy_len);
12547 read_len += copy_len;
12548 }
12549
12550 return read_len;
12551 }
12552
hclge_get_module_eeprom(struct hnae3_handle * handle,u32 offset,u32 len,u8 * data)12553 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12554 u32 len, u8 *data)
12555 {
12556 struct hclge_vport *vport = hclge_get_vport(handle);
12557 struct hclge_dev *hdev = vport->back;
12558 u32 read_len = 0;
12559 u16 data_len;
12560
12561 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12562 return -EOPNOTSUPP;
12563
12564 if (!hclge_module_existed(hdev))
12565 return -ENXIO;
12566
12567 while (read_len < len) {
12568 data_len = hclge_get_sfp_eeprom_info(hdev,
12569 offset + read_len,
12570 len - read_len,
12571 data + read_len);
12572 if (!data_len)
12573 return -EIO;
12574
12575 read_len += data_len;
12576 }
12577
12578 return 0;
12579 }
12580
hclge_get_link_diagnosis_info(struct hnae3_handle * handle,u32 * status_code)12581 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12582 u32 *status_code)
12583 {
12584 struct hclge_vport *vport = hclge_get_vport(handle);
12585 struct hclge_dev *hdev = vport->back;
12586 struct hclge_desc desc;
12587 int ret;
12588
12589 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12590 return -EOPNOTSUPP;
12591
12592 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12593 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12594 if (ret) {
12595 dev_err(&hdev->pdev->dev,
12596 "failed to query link diagnosis info, ret = %d\n", ret);
12597 return ret;
12598 }
12599
12600 *status_code = le32_to_cpu(desc.data[0]);
12601 return 0;
12602 }
12603
12604 /* After disable sriov, VF still has some config and info need clean,
12605 * which configed by PF.
12606 */
hclge_clear_vport_vf_info(struct hclge_vport * vport,int vfid)12607 static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
12608 {
12609 struct hclge_dev *hdev = vport->back;
12610 struct hclge_vlan_info vlan_info;
12611 int ret;
12612
12613 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
12614 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
12615 vport->need_notify = 0;
12616 vport->mps = 0;
12617
12618 /* after disable sriov, clean VF rate configured by PF */
12619 ret = hclge_tm_qs_shaper_cfg(vport, 0);
12620 if (ret)
12621 dev_err(&hdev->pdev->dev,
12622 "failed to clean vf%d rate config, ret = %d\n",
12623 vfid, ret);
12624
12625 vlan_info.vlan_tag = 0;
12626 vlan_info.qos = 0;
12627 vlan_info.vlan_proto = ETH_P_8021Q;
12628 ret = hclge_update_port_base_vlan_cfg(vport,
12629 HNAE3_PORT_BASE_VLAN_DISABLE,
12630 &vlan_info);
12631 if (ret)
12632 dev_err(&hdev->pdev->dev,
12633 "failed to clean vf%d port base vlan, ret = %d\n",
12634 vfid, ret);
12635
12636 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
12637 if (ret)
12638 dev_err(&hdev->pdev->dev,
12639 "failed to clean vf%d spoof config, ret = %d\n",
12640 vfid, ret);
12641
12642 memset(&vport->vf_info, 0, sizeof(vport->vf_info));
12643 }
12644
hclge_clean_vport_config(struct hnae3_ae_dev * ae_dev,int num_vfs)12645 static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
12646 {
12647 struct hclge_dev *hdev = ae_dev->priv;
12648 struct hclge_vport *vport;
12649 int i;
12650
12651 for (i = 0; i < num_vfs; i++) {
12652 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
12653
12654 hclge_clear_vport_vf_info(vport, i);
12655 }
12656 }
12657
hclge_get_dscp_prio(struct hnae3_handle * h,u8 dscp,u8 * tc_mode,u8 * priority)12658 static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
12659 u8 *priority)
12660 {
12661 struct hclge_vport *vport = hclge_get_vport(h);
12662
12663 if (dscp >= HNAE3_MAX_DSCP)
12664 return -EINVAL;
12665
12666 if (tc_mode)
12667 *tc_mode = vport->nic.kinfo.tc_map_mode;
12668 if (priority)
12669 *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
12670 vport->nic.kinfo.dscp_prio[dscp];
12671
12672 return 0;
12673 }
12674
12675 static const struct hnae3_ae_ops hclge_ops = {
12676 .init_ae_dev = hclge_init_ae_dev,
12677 .uninit_ae_dev = hclge_uninit_ae_dev,
12678 .reset_prepare = hclge_reset_prepare_general,
12679 .reset_done = hclge_reset_done,
12680 .init_client_instance = hclge_init_client_instance,
12681 .uninit_client_instance = hclge_uninit_client_instance,
12682 .map_ring_to_vector = hclge_map_ring_to_vector,
12683 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12684 .get_vector = hclge_get_vector,
12685 .put_vector = hclge_put_vector,
12686 .set_promisc_mode = hclge_set_promisc_mode,
12687 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12688 .set_loopback = hclge_set_loopback,
12689 .start = hclge_ae_start,
12690 .stop = hclge_ae_stop,
12691 .client_start = hclge_client_start,
12692 .client_stop = hclge_client_stop,
12693 .get_status = hclge_get_status,
12694 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12695 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12696 .get_media_type = hclge_get_media_type,
12697 .check_port_speed = hclge_check_port_speed,
12698 .get_fec_stats = hclge_get_fec_stats,
12699 .get_fec = hclge_get_fec,
12700 .set_fec = hclge_set_fec,
12701 .get_rss_key_size = hclge_comm_get_rss_key_size,
12702 .get_rss = hclge_get_rss,
12703 .set_rss = hclge_set_rss,
12704 .set_rss_tuple = hclge_set_rss_tuple,
12705 .get_rss_tuple = hclge_get_rss_tuple,
12706 .get_tc_size = hclge_get_tc_size,
12707 .get_mac_addr = hclge_get_mac_addr,
12708 .set_mac_addr = hclge_set_mac_addr,
12709 .do_ioctl = hclge_do_ioctl,
12710 .add_uc_addr = hclge_add_uc_addr,
12711 .rm_uc_addr = hclge_rm_uc_addr,
12712 .add_mc_addr = hclge_add_mc_addr,
12713 .rm_mc_addr = hclge_rm_mc_addr,
12714 .set_autoneg = hclge_set_autoneg,
12715 .get_autoneg = hclge_get_autoneg,
12716 .restart_autoneg = hclge_restart_autoneg,
12717 .halt_autoneg = hclge_halt_autoneg,
12718 .get_pauseparam = hclge_get_pauseparam,
12719 .set_pauseparam = hclge_set_pauseparam,
12720 .set_mtu = hclge_set_mtu,
12721 .reset_queue = hclge_reset_tqp,
12722 .get_stats = hclge_get_stats,
12723 .get_mac_stats = hclge_get_mac_stat,
12724 .update_stats = hclge_update_stats,
12725 .get_strings = hclge_get_strings,
12726 .get_sset_count = hclge_get_sset_count,
12727 .get_fw_version = hclge_get_fw_version,
12728 .get_mdix_mode = hclge_get_mdix_mode,
12729 .enable_vlan_filter = hclge_enable_vlan_filter,
12730 .set_vlan_filter = hclge_set_vlan_filter,
12731 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12732 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12733 .reset_event = hclge_reset_event,
12734 .get_reset_level = hclge_get_reset_level,
12735 .set_default_reset_request = hclge_set_def_reset_request,
12736 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12737 .set_channels = hclge_set_channels,
12738 .get_channels = hclge_get_channels,
12739 .get_regs_len = hclge_get_regs_len,
12740 .get_regs = hclge_get_regs,
12741 .set_led_id = hclge_set_led_id,
12742 .get_link_mode = hclge_get_link_mode,
12743 .add_fd_entry = hclge_add_fd_entry,
12744 .del_fd_entry = hclge_del_fd_entry,
12745 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12746 .get_fd_rule_info = hclge_get_fd_rule_info,
12747 .get_fd_all_rules = hclge_get_all_rules,
12748 .enable_fd = hclge_enable_fd,
12749 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12750 .dbg_read_cmd = hclge_dbg_read_cmd,
12751 .handle_hw_ras_error = hclge_handle_hw_ras_error,
12752 .get_hw_reset_stat = hclge_get_hw_reset_stat,
12753 .ae_dev_resetting = hclge_ae_dev_resetting,
12754 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12755 .set_gro_en = hclge_gro_en,
12756 .get_global_queue_id = hclge_covert_handle_qid_global,
12757 .set_timer_task = hclge_set_timer_task,
12758 .mac_connect_phy = hclge_mac_connect_phy,
12759 .mac_disconnect_phy = hclge_mac_disconnect_phy,
12760 .get_vf_config = hclge_get_vf_config,
12761 .set_vf_link_state = hclge_set_vf_link_state,
12762 .set_vf_spoofchk = hclge_set_vf_spoofchk,
12763 .set_vf_trust = hclge_set_vf_trust,
12764 .set_vf_rate = hclge_set_vf_rate,
12765 .set_vf_mac = hclge_set_vf_mac,
12766 .get_module_eeprom = hclge_get_module_eeprom,
12767 .get_cmdq_stat = hclge_get_cmdq_stat,
12768 .add_cls_flower = hclge_add_cls_flower,
12769 .del_cls_flower = hclge_del_cls_flower,
12770 .cls_flower_active = hclge_is_cls_flower_active,
12771 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12772 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12773 .set_tx_hwts_info = hclge_ptp_set_tx_info,
12774 .get_rx_hwts = hclge_ptp_get_rx_hwts,
12775 .get_ts_info = hclge_ptp_get_ts_info,
12776 .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
12777 .clean_vf_config = hclge_clean_vport_config,
12778 .get_dscp_prio = hclge_get_dscp_prio,
12779 .get_wol = hclge_get_wol,
12780 .set_wol = hclge_set_wol,
12781 };
12782
12783 static struct hnae3_ae_algo ae_algo = {
12784 .ops = &hclge_ops,
12785 .pdev_id_table = ae_algo_pci_tbl,
12786 };
12787
hclge_init(void)12788 static int __init hclge_init(void)
12789 {
12790 pr_info("%s is initializing\n", HCLGE_NAME);
12791
12792 hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
12793 if (!hclge_wq) {
12794 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12795 return -ENOMEM;
12796 }
12797
12798 hnae3_register_ae_algo(&ae_algo);
12799
12800 return 0;
12801 }
12802
hclge_exit(void)12803 static void __exit hclge_exit(void)
12804 {
12805 hnae3_unregister_ae_algo_prepare(&ae_algo);
12806 hnae3_unregister_ae_algo(&ae_algo);
12807 destroy_workqueue(hclge_wq);
12808 }
12809 module_init(hclge_init);
12810 module_exit(hclge_exit);
12811
12812 MODULE_LICENSE("GPL");
12813 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12814 MODULE_DESCRIPTION("HCLGE Driver");
12815 MODULE_VERSION(HCLGE_MOD_VERSION);
12816