1 /* 2 * Copyright (c) 2016-2017 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/device.h> 12 #include <linux/etherdevice.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/netdevice.h> 18 #include <linux/pci.h> 19 #include <linux/platform_device.h> 20 #include <linux/if_vlan.h> 21 #include <net/rtnetlink.h> 22 #include "hclge_cmd.h" 23 #include "hclge_dcb.h" 24 #include "hclge_main.h" 25 #include "hclge_mbx.h" 26 #include "hclge_mdio.h" 27 #include "hclge_tm.h" 28 #include "hnae3.h" 29 30 #define HCLGE_NAME "hclge" 31 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) 32 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) 33 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) 34 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) 35 36 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 37 enum hclge_mta_dmac_sel_type mta_mac_sel, 38 bool enable); 39 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); 40 static int hclge_init_vlan_config(struct hclge_dev *hdev); 41 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); 42 static int hclge_update_led_status(struct hclge_dev *hdev); 43 44 static struct hnae3_ae_algo ae_algo; 45 46 static const struct pci_device_id ae_algo_pci_tbl[] = { 47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 53 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 54 /* required last entry */ 55 {0, } 56 }; 57 58 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); 59 60 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { 61 "Mac Loopback test", 62 "Serdes Loopback test", 63 "Phy Loopback test" 64 }; 65 66 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = { 67 {"igu_rx_oversize_pkt", 68 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)}, 69 {"igu_rx_undersize_pkt", 70 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)}, 71 {"igu_rx_out_all_pkt", 72 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)}, 73 {"igu_rx_uni_pkt", 74 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)}, 75 {"igu_rx_multi_pkt", 76 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)}, 77 {"igu_rx_broad_pkt", 78 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)}, 79 {"egu_tx_out_all_pkt", 80 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)}, 81 {"egu_tx_uni_pkt", 82 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)}, 83 {"egu_tx_multi_pkt", 84 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)}, 85 {"egu_tx_broad_pkt", 86 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)}, 87 {"ssu_ppp_mac_key_num", 88 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)}, 89 {"ssu_ppp_host_key_num", 90 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)}, 91 {"ppp_ssu_mac_rlt_num", 92 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)}, 93 {"ppp_ssu_host_rlt_num", 94 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)}, 95 {"ssu_tx_in_num", 96 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)}, 97 {"ssu_tx_out_num", 98 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)}, 99 {"ssu_rx_in_num", 100 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)}, 101 {"ssu_rx_out_num", 102 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)} 103 }; 104 105 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = { 106 {"igu_rx_err_pkt", 107 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)}, 108 {"igu_rx_no_eof_pkt", 109 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)}, 110 {"igu_rx_no_sof_pkt", 111 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)}, 112 {"egu_tx_1588_pkt", 113 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)}, 114 {"ssu_full_drop_num", 115 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)}, 116 {"ssu_part_drop_num", 117 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)}, 118 {"ppp_key_drop_num", 119 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)}, 120 {"ppp_rlt_drop_num", 121 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)}, 122 {"ssu_key_drop_num", 123 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)}, 124 {"pkt_curr_buf_cnt", 125 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)}, 126 {"qcn_fb_rcv_cnt", 127 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)}, 128 {"qcn_fb_drop_cnt", 129 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)}, 130 {"qcn_fb_invaild_cnt", 131 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)}, 132 {"rx_packet_tc0_in_cnt", 133 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)}, 134 {"rx_packet_tc1_in_cnt", 135 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)}, 136 {"rx_packet_tc2_in_cnt", 137 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)}, 138 {"rx_packet_tc3_in_cnt", 139 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)}, 140 {"rx_packet_tc4_in_cnt", 141 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)}, 142 {"rx_packet_tc5_in_cnt", 143 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)}, 144 {"rx_packet_tc6_in_cnt", 145 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)}, 146 {"rx_packet_tc7_in_cnt", 147 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)}, 148 {"rx_packet_tc0_out_cnt", 149 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)}, 150 {"rx_packet_tc1_out_cnt", 151 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)}, 152 {"rx_packet_tc2_out_cnt", 153 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)}, 154 {"rx_packet_tc3_out_cnt", 155 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)}, 156 {"rx_packet_tc4_out_cnt", 157 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)}, 158 {"rx_packet_tc5_out_cnt", 159 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)}, 160 {"rx_packet_tc6_out_cnt", 161 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)}, 162 {"rx_packet_tc7_out_cnt", 163 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)}, 164 {"tx_packet_tc0_in_cnt", 165 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)}, 166 {"tx_packet_tc1_in_cnt", 167 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)}, 168 {"tx_packet_tc2_in_cnt", 169 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)}, 170 {"tx_packet_tc3_in_cnt", 171 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)}, 172 {"tx_packet_tc4_in_cnt", 173 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)}, 174 {"tx_packet_tc5_in_cnt", 175 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)}, 176 {"tx_packet_tc6_in_cnt", 177 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)}, 178 {"tx_packet_tc7_in_cnt", 179 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)}, 180 {"tx_packet_tc0_out_cnt", 181 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)}, 182 {"tx_packet_tc1_out_cnt", 183 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)}, 184 {"tx_packet_tc2_out_cnt", 185 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)}, 186 {"tx_packet_tc3_out_cnt", 187 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)}, 188 {"tx_packet_tc4_out_cnt", 189 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)}, 190 {"tx_packet_tc5_out_cnt", 191 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)}, 192 {"tx_packet_tc6_out_cnt", 193 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)}, 194 {"tx_packet_tc7_out_cnt", 195 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)}, 196 {"pkt_curr_buf_tc0_cnt", 197 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)}, 198 {"pkt_curr_buf_tc1_cnt", 199 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)}, 200 {"pkt_curr_buf_tc2_cnt", 201 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)}, 202 {"pkt_curr_buf_tc3_cnt", 203 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)}, 204 {"pkt_curr_buf_tc4_cnt", 205 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)}, 206 {"pkt_curr_buf_tc5_cnt", 207 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)}, 208 {"pkt_curr_buf_tc6_cnt", 209 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)}, 210 {"pkt_curr_buf_tc7_cnt", 211 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)}, 212 {"mb_uncopy_num", 213 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)}, 214 {"lo_pri_unicast_rlt_drop_num", 215 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)}, 216 {"hi_pri_multicast_rlt_drop_num", 217 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)}, 218 {"lo_pri_multicast_rlt_drop_num", 219 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)}, 220 {"rx_oq_drop_pkt_cnt", 221 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)}, 222 {"tx_oq_drop_pkt_cnt", 223 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)}, 224 {"nic_l2_err_drop_pkt_cnt", 225 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)}, 226 {"roc_l2_err_drop_pkt_cnt", 227 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)} 228 }; 229 230 static const struct hclge_comm_stats_str g_mac_stats_string[] = { 231 {"mac_tx_mac_pause_num", 232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, 233 {"mac_rx_mac_pause_num", 234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, 235 {"mac_tx_pfc_pri0_pkt_num", 236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, 237 {"mac_tx_pfc_pri1_pkt_num", 238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, 239 {"mac_tx_pfc_pri2_pkt_num", 240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, 241 {"mac_tx_pfc_pri3_pkt_num", 242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, 243 {"mac_tx_pfc_pri4_pkt_num", 244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, 245 {"mac_tx_pfc_pri5_pkt_num", 246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, 247 {"mac_tx_pfc_pri6_pkt_num", 248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, 249 {"mac_tx_pfc_pri7_pkt_num", 250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, 251 {"mac_rx_pfc_pri0_pkt_num", 252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, 253 {"mac_rx_pfc_pri1_pkt_num", 254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, 255 {"mac_rx_pfc_pri2_pkt_num", 256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, 257 {"mac_rx_pfc_pri3_pkt_num", 258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, 259 {"mac_rx_pfc_pri4_pkt_num", 260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, 261 {"mac_rx_pfc_pri5_pkt_num", 262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, 263 {"mac_rx_pfc_pri6_pkt_num", 264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, 265 {"mac_rx_pfc_pri7_pkt_num", 266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, 267 {"mac_tx_total_pkt_num", 268 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, 269 {"mac_tx_total_oct_num", 270 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, 271 {"mac_tx_good_pkt_num", 272 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, 273 {"mac_tx_bad_pkt_num", 274 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, 275 {"mac_tx_good_oct_num", 276 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, 277 {"mac_tx_bad_oct_num", 278 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, 279 {"mac_tx_uni_pkt_num", 280 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, 281 {"mac_tx_multi_pkt_num", 282 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, 283 {"mac_tx_broad_pkt_num", 284 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, 285 {"mac_tx_undersize_pkt_num", 286 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, 287 {"mac_tx_oversize_pkt_num", 288 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, 289 {"mac_tx_64_oct_pkt_num", 290 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, 291 {"mac_tx_65_127_oct_pkt_num", 292 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, 293 {"mac_tx_128_255_oct_pkt_num", 294 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, 295 {"mac_tx_256_511_oct_pkt_num", 296 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, 297 {"mac_tx_512_1023_oct_pkt_num", 298 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, 299 {"mac_tx_1024_1518_oct_pkt_num", 300 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, 301 {"mac_tx_1519_2047_oct_pkt_num", 302 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, 303 {"mac_tx_2048_4095_oct_pkt_num", 304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, 305 {"mac_tx_4096_8191_oct_pkt_num", 306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, 307 {"mac_tx_8192_9216_oct_pkt_num", 308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, 309 {"mac_tx_9217_12287_oct_pkt_num", 310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, 311 {"mac_tx_12288_16383_oct_pkt_num", 312 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, 313 {"mac_tx_1519_max_good_pkt_num", 314 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, 315 {"mac_tx_1519_max_bad_pkt_num", 316 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, 317 {"mac_rx_total_pkt_num", 318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, 319 {"mac_rx_total_oct_num", 320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, 321 {"mac_rx_good_pkt_num", 322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, 323 {"mac_rx_bad_pkt_num", 324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, 325 {"mac_rx_good_oct_num", 326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, 327 {"mac_rx_bad_oct_num", 328 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, 329 {"mac_rx_uni_pkt_num", 330 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, 331 {"mac_rx_multi_pkt_num", 332 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, 333 {"mac_rx_broad_pkt_num", 334 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, 335 {"mac_rx_undersize_pkt_num", 336 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, 337 {"mac_rx_oversize_pkt_num", 338 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, 339 {"mac_rx_64_oct_pkt_num", 340 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, 341 {"mac_rx_65_127_oct_pkt_num", 342 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, 343 {"mac_rx_128_255_oct_pkt_num", 344 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, 345 {"mac_rx_256_511_oct_pkt_num", 346 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, 347 {"mac_rx_512_1023_oct_pkt_num", 348 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, 349 {"mac_rx_1024_1518_oct_pkt_num", 350 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, 351 {"mac_rx_1519_2047_oct_pkt_num", 352 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, 353 {"mac_rx_2048_4095_oct_pkt_num", 354 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, 355 {"mac_rx_4096_8191_oct_pkt_num", 356 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, 357 {"mac_rx_8192_9216_oct_pkt_num", 358 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, 359 {"mac_rx_9217_12287_oct_pkt_num", 360 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, 361 {"mac_rx_12288_16383_oct_pkt_num", 362 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, 363 {"mac_rx_1519_max_good_pkt_num", 364 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, 365 {"mac_rx_1519_max_bad_pkt_num", 366 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, 367 368 {"mac_tx_fragment_pkt_num", 369 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, 370 {"mac_tx_undermin_pkt_num", 371 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, 372 {"mac_tx_jabber_pkt_num", 373 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, 374 {"mac_tx_err_all_pkt_num", 375 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, 376 {"mac_tx_from_app_good_pkt_num", 377 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, 378 {"mac_tx_from_app_bad_pkt_num", 379 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, 380 {"mac_rx_fragment_pkt_num", 381 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, 382 {"mac_rx_undermin_pkt_num", 383 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, 384 {"mac_rx_jabber_pkt_num", 385 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, 386 {"mac_rx_fcs_err_pkt_num", 387 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, 388 {"mac_rx_send_app_good_pkt_num", 389 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, 390 {"mac_rx_send_app_bad_pkt_num", 391 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} 392 }; 393 394 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { 395 { 396 .flags = HCLGE_MAC_MGR_MASK_VLAN_B, 397 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP), 398 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), 399 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)), 400 .i_port_bitmap = 0x1, 401 }, 402 }; 403 404 static int hclge_64_bit_update_stats(struct hclge_dev *hdev) 405 { 406 #define HCLGE_64_BIT_CMD_NUM 5 407 #define HCLGE_64_BIT_RTN_DATANUM 4 408 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); 409 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; 410 __le64 *desc_data; 411 int i, k, n; 412 int ret; 413 414 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true); 415 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM); 416 if (ret) { 417 dev_err(&hdev->pdev->dev, 418 "Get 64 bit pkt stats fail, status = %d.\n", ret); 419 return ret; 420 } 421 422 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { 423 if (unlikely(i == 0)) { 424 desc_data = (__le64 *)(&desc[i].data[0]); 425 n = HCLGE_64_BIT_RTN_DATANUM - 1; 426 } else { 427 desc_data = (__le64 *)(&desc[i]); 428 n = HCLGE_64_BIT_RTN_DATANUM; 429 } 430 for (k = 0; k < n; k++) { 431 *data++ += le64_to_cpu(*desc_data); 432 desc_data++; 433 } 434 } 435 436 return 0; 437 } 438 439 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats) 440 { 441 stats->pkt_curr_buf_cnt = 0; 442 stats->pkt_curr_buf_tc0_cnt = 0; 443 stats->pkt_curr_buf_tc1_cnt = 0; 444 stats->pkt_curr_buf_tc2_cnt = 0; 445 stats->pkt_curr_buf_tc3_cnt = 0; 446 stats->pkt_curr_buf_tc4_cnt = 0; 447 stats->pkt_curr_buf_tc5_cnt = 0; 448 stats->pkt_curr_buf_tc6_cnt = 0; 449 stats->pkt_curr_buf_tc7_cnt = 0; 450 } 451 452 static int hclge_32_bit_update_stats(struct hclge_dev *hdev) 453 { 454 #define HCLGE_32_BIT_CMD_NUM 8 455 #define HCLGE_32_BIT_RTN_DATANUM 8 456 457 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; 458 struct hclge_32_bit_stats *all_32_bit_stats; 459 __le32 *desc_data; 460 int i, k, n; 461 u64 *data; 462 int ret; 463 464 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats; 465 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt); 466 467 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true); 468 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM); 469 if (ret) { 470 dev_err(&hdev->pdev->dev, 471 "Get 32 bit pkt stats fail, status = %d.\n", ret); 472 473 return ret; 474 } 475 476 hclge_reset_partial_32bit_counter(all_32_bit_stats); 477 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { 478 if (unlikely(i == 0)) { 479 __le16 *desc_data_16bit; 480 481 all_32_bit_stats->igu_rx_err_pkt += 482 le32_to_cpu(desc[i].data[0]); 483 484 desc_data_16bit = (__le16 *)&desc[i].data[1]; 485 all_32_bit_stats->igu_rx_no_eof_pkt += 486 le16_to_cpu(*desc_data_16bit); 487 488 desc_data_16bit++; 489 all_32_bit_stats->igu_rx_no_sof_pkt += 490 le16_to_cpu(*desc_data_16bit); 491 492 desc_data = &desc[i].data[2]; 493 n = HCLGE_32_BIT_RTN_DATANUM - 4; 494 } else { 495 desc_data = (__le32 *)&desc[i]; 496 n = HCLGE_32_BIT_RTN_DATANUM; 497 } 498 for (k = 0; k < n; k++) { 499 *data++ += le32_to_cpu(*desc_data); 500 desc_data++; 501 } 502 } 503 504 return 0; 505 } 506 507 static int hclge_mac_get_traffic_stats(struct hclge_dev *hdev) 508 { 509 struct hclge_mac_stats *mac_stats = &hdev->hw_stats.mac_stats; 510 struct hclge_desc desc; 511 __le64 *desc_data; 512 int ret; 513 514 /* for fiber port, need to query the total rx/tx packets statstics, 515 * used for data transferring checking. 516 */ 517 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) 518 return 0; 519 520 if (test_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) 521 return 0; 522 523 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_STATS_MAC_TRAFFIC, true); 524 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 525 if (ret) { 526 dev_err(&hdev->pdev->dev, 527 "Get MAC total pkt stats fail, ret = %d\n", ret); 528 529 return ret; 530 } 531 532 desc_data = (__le64 *)(&desc.data[0]); 533 mac_stats->mac_tx_total_pkt_num += le64_to_cpu(*desc_data++); 534 mac_stats->mac_rx_total_pkt_num += le64_to_cpu(*desc_data); 535 536 return 0; 537 } 538 539 static int hclge_mac_update_stats(struct hclge_dev *hdev) 540 { 541 #define HCLGE_MAC_CMD_NUM 21 542 #define HCLGE_RTN_DATA_NUM 4 543 544 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); 545 struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; 546 __le64 *desc_data; 547 int i, k, n; 548 int ret; 549 550 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); 551 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); 552 if (ret) { 553 dev_err(&hdev->pdev->dev, 554 "Get MAC pkt stats fail, status = %d.\n", ret); 555 556 return ret; 557 } 558 559 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { 560 if (unlikely(i == 0)) { 561 desc_data = (__le64 *)(&desc[i].data[0]); 562 n = HCLGE_RTN_DATA_NUM - 2; 563 } else { 564 desc_data = (__le64 *)(&desc[i]); 565 n = HCLGE_RTN_DATA_NUM; 566 } 567 for (k = 0; k < n; k++) { 568 *data++ += le64_to_cpu(*desc_data); 569 desc_data++; 570 } 571 } 572 573 return 0; 574 } 575 576 static int hclge_tqps_update_stats(struct hnae3_handle *handle) 577 { 578 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 579 struct hclge_vport *vport = hclge_get_vport(handle); 580 struct hclge_dev *hdev = vport->back; 581 struct hnae3_queue *queue; 582 struct hclge_desc desc[1]; 583 struct hclge_tqp *tqp; 584 int ret, i; 585 586 for (i = 0; i < kinfo->num_tqps; i++) { 587 queue = handle->kinfo.tqp[i]; 588 tqp = container_of(queue, struct hclge_tqp, q); 589 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 590 hclge_cmd_setup_basic_desc(&desc[0], 591 HCLGE_OPC_QUERY_RX_STATUS, 592 true); 593 594 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 595 ret = hclge_cmd_send(&hdev->hw, desc, 1); 596 if (ret) { 597 dev_err(&hdev->pdev->dev, 598 "Query tqp stat fail, status = %d,queue = %d\n", 599 ret, i); 600 return ret; 601 } 602 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 603 le32_to_cpu(desc[0].data[1]); 604 } 605 606 for (i = 0; i < kinfo->num_tqps; i++) { 607 queue = handle->kinfo.tqp[i]; 608 tqp = container_of(queue, struct hclge_tqp, q); 609 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 610 hclge_cmd_setup_basic_desc(&desc[0], 611 HCLGE_OPC_QUERY_TX_STATUS, 612 true); 613 614 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 615 ret = hclge_cmd_send(&hdev->hw, desc, 1); 616 if (ret) { 617 dev_err(&hdev->pdev->dev, 618 "Query tqp stat fail, status = %d,queue = %d\n", 619 ret, i); 620 return ret; 621 } 622 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 623 le32_to_cpu(desc[0].data[1]); 624 } 625 626 return 0; 627 } 628 629 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 630 { 631 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 632 struct hclge_tqp *tqp; 633 u64 *buff = data; 634 int i; 635 636 for (i = 0; i < kinfo->num_tqps; i++) { 637 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 638 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 639 } 640 641 for (i = 0; i < kinfo->num_tqps; i++) { 642 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 643 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 644 } 645 646 return buff; 647 } 648 649 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) 650 { 651 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 652 653 return kinfo->num_tqps * (2); 654 } 655 656 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 657 { 658 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 659 u8 *buff = data; 660 int i = 0; 661 662 for (i = 0; i < kinfo->num_tqps; i++) { 663 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], 664 struct hclge_tqp, q); 665 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", 666 tqp->index); 667 buff = buff + ETH_GSTRING_LEN; 668 } 669 670 for (i = 0; i < kinfo->num_tqps; i++) { 671 struct hclge_tqp *tqp = container_of(kinfo->tqp[i], 672 struct hclge_tqp, q); 673 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", 674 tqp->index); 675 buff = buff + ETH_GSTRING_LEN; 676 } 677 678 return buff; 679 } 680 681 static u64 *hclge_comm_get_stats(void *comm_stats, 682 const struct hclge_comm_stats_str strs[], 683 int size, u64 *data) 684 { 685 u64 *buf = data; 686 u32 i; 687 688 for (i = 0; i < size; i++) 689 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); 690 691 return buf + size; 692 } 693 694 static u8 *hclge_comm_get_strings(u32 stringset, 695 const struct hclge_comm_stats_str strs[], 696 int size, u8 *data) 697 { 698 char *buff = (char *)data; 699 u32 i; 700 701 if (stringset != ETH_SS_STATS) 702 return buff; 703 704 for (i = 0; i < size; i++) { 705 snprintf(buff, ETH_GSTRING_LEN, 706 strs[i].desc); 707 buff = buff + ETH_GSTRING_LEN; 708 } 709 710 return (u8 *)buff; 711 } 712 713 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, 714 struct net_device_stats *net_stats) 715 { 716 net_stats->tx_dropped = 0; 717 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num; 718 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; 719 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; 720 721 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; 722 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; 723 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; 724 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; 725 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; 726 727 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; 728 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; 729 730 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; 731 net_stats->rx_length_errors = 732 hw_stats->mac_stats.mac_rx_undersize_pkt_num; 733 net_stats->rx_length_errors += 734 hw_stats->mac_stats.mac_rx_oversize_pkt_num; 735 net_stats->rx_over_errors = 736 hw_stats->mac_stats.mac_rx_oversize_pkt_num; 737 } 738 739 static void hclge_update_stats_for_all(struct hclge_dev *hdev) 740 { 741 struct hnae3_handle *handle; 742 int status; 743 744 handle = &hdev->vport[0].nic; 745 if (handle->client) { 746 status = hclge_tqps_update_stats(handle); 747 if (status) { 748 dev_err(&hdev->pdev->dev, 749 "Update TQPS stats fail, status = %d.\n", 750 status); 751 } 752 } 753 754 status = hclge_mac_update_stats(hdev); 755 if (status) 756 dev_err(&hdev->pdev->dev, 757 "Update MAC stats fail, status = %d.\n", status); 758 759 status = hclge_32_bit_update_stats(hdev); 760 if (status) 761 dev_err(&hdev->pdev->dev, 762 "Update 32 bit stats fail, status = %d.\n", 763 status); 764 765 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); 766 } 767 768 static void hclge_update_stats(struct hnae3_handle *handle, 769 struct net_device_stats *net_stats) 770 { 771 struct hclge_vport *vport = hclge_get_vport(handle); 772 struct hclge_dev *hdev = vport->back; 773 struct hclge_hw_stats *hw_stats = &hdev->hw_stats; 774 int status; 775 776 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) 777 return; 778 779 status = hclge_mac_update_stats(hdev); 780 if (status) 781 dev_err(&hdev->pdev->dev, 782 "Update MAC stats fail, status = %d.\n", 783 status); 784 785 status = hclge_32_bit_update_stats(hdev); 786 if (status) 787 dev_err(&hdev->pdev->dev, 788 "Update 32 bit stats fail, status = %d.\n", 789 status); 790 791 status = hclge_64_bit_update_stats(hdev); 792 if (status) 793 dev_err(&hdev->pdev->dev, 794 "Update 64 bit stats fail, status = %d.\n", 795 status); 796 797 status = hclge_tqps_update_stats(handle); 798 if (status) 799 dev_err(&hdev->pdev->dev, 800 "Update TQPS stats fail, status = %d.\n", 801 status); 802 803 hclge_update_netstat(hw_stats, net_stats); 804 805 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); 806 } 807 808 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) 809 { 810 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7 811 812 struct hclge_vport *vport = hclge_get_vport(handle); 813 struct hclge_dev *hdev = vport->back; 814 int count = 0; 815 816 /* Loopback test support rules: 817 * mac: only GE mode support 818 * serdes: all mac mode will support include GE/XGE/LGE/CGE 819 * phy: only support when phy device exist on board 820 */ 821 if (stringset == ETH_SS_TEST) { 822 /* clear loopback bit flags at first */ 823 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); 824 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || 825 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || 826 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { 827 count += 1; 828 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; 829 } else { 830 count = -EOPNOTSUPP; 831 } 832 } else if (stringset == ETH_SS_STATS) { 833 count = ARRAY_SIZE(g_mac_stats_string) + 834 ARRAY_SIZE(g_all_32bit_stats_string) + 835 ARRAY_SIZE(g_all_64bit_stats_string) + 836 hclge_tqps_get_sset_count(handle, stringset); 837 } 838 839 return count; 840 } 841 842 static void hclge_get_strings(struct hnae3_handle *handle, 843 u32 stringset, 844 u8 *data) 845 { 846 u8 *p = (char *)data; 847 int size; 848 849 if (stringset == ETH_SS_STATS) { 850 size = ARRAY_SIZE(g_mac_stats_string); 851 p = hclge_comm_get_strings(stringset, 852 g_mac_stats_string, 853 size, 854 p); 855 size = ARRAY_SIZE(g_all_32bit_stats_string); 856 p = hclge_comm_get_strings(stringset, 857 g_all_32bit_stats_string, 858 size, 859 p); 860 size = ARRAY_SIZE(g_all_64bit_stats_string); 861 p = hclge_comm_get_strings(stringset, 862 g_all_64bit_stats_string, 863 size, 864 p); 865 p = hclge_tqps_get_strings(handle, p); 866 } else if (stringset == ETH_SS_TEST) { 867 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) { 868 memcpy(p, 869 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC], 870 ETH_GSTRING_LEN); 871 p += ETH_GSTRING_LEN; 872 } 873 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) { 874 memcpy(p, 875 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES], 876 ETH_GSTRING_LEN); 877 p += ETH_GSTRING_LEN; 878 } 879 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { 880 memcpy(p, 881 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY], 882 ETH_GSTRING_LEN); 883 p += ETH_GSTRING_LEN; 884 } 885 } 886 } 887 888 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) 889 { 890 struct hclge_vport *vport = hclge_get_vport(handle); 891 struct hclge_dev *hdev = vport->back; 892 u64 *p; 893 894 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, 895 g_mac_stats_string, 896 ARRAY_SIZE(g_mac_stats_string), 897 data); 898 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats, 899 g_all_32bit_stats_string, 900 ARRAY_SIZE(g_all_32bit_stats_string), 901 p); 902 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats, 903 g_all_64bit_stats_string, 904 ARRAY_SIZE(g_all_64bit_stats_string), 905 p); 906 p = hclge_tqps_get_stats(handle, p); 907 } 908 909 static int hclge_parse_func_status(struct hclge_dev *hdev, 910 struct hclge_func_status_cmd *status) 911 { 912 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) 913 return -EINVAL; 914 915 /* Set the pf to main pf */ 916 if (status->pf_state & HCLGE_PF_STATE_MAIN) 917 hdev->flag |= HCLGE_FLAG_MAIN; 918 else 919 hdev->flag &= ~HCLGE_FLAG_MAIN; 920 921 return 0; 922 } 923 924 static int hclge_query_function_status(struct hclge_dev *hdev) 925 { 926 struct hclge_func_status_cmd *req; 927 struct hclge_desc desc; 928 int timeout = 0; 929 int ret; 930 931 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); 932 req = (struct hclge_func_status_cmd *)desc.data; 933 934 do { 935 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 936 if (ret) { 937 dev_err(&hdev->pdev->dev, 938 "query function status failed %d.\n", 939 ret); 940 941 return ret; 942 } 943 944 /* Check pf reset is done */ 945 if (req->pf_state) 946 break; 947 usleep_range(1000, 2000); 948 } while (timeout++ < 5); 949 950 ret = hclge_parse_func_status(hdev, req); 951 952 return ret; 953 } 954 955 static int hclge_query_pf_resource(struct hclge_dev *hdev) 956 { 957 struct hclge_pf_res_cmd *req; 958 struct hclge_desc desc; 959 int ret; 960 961 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); 962 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 963 if (ret) { 964 dev_err(&hdev->pdev->dev, 965 "query pf resource failed %d.\n", ret); 966 return ret; 967 } 968 969 req = (struct hclge_pf_res_cmd *)desc.data; 970 hdev->num_tqps = __le16_to_cpu(req->tqp_num); 971 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; 972 973 if (hnae3_dev_roce_supported(hdev)) { 974 hdev->num_roce_msi = 975 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), 976 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 977 978 /* PF should have NIC vectors and Roce vectors, 979 * NIC vectors are queued before Roce vectors. 980 */ 981 hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET; 982 } else { 983 hdev->num_msi = 984 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), 985 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 986 } 987 988 return 0; 989 } 990 991 static int hclge_parse_speed(int speed_cmd, int *speed) 992 { 993 switch (speed_cmd) { 994 case 6: 995 *speed = HCLGE_MAC_SPEED_10M; 996 break; 997 case 7: 998 *speed = HCLGE_MAC_SPEED_100M; 999 break; 1000 case 0: 1001 *speed = HCLGE_MAC_SPEED_1G; 1002 break; 1003 case 1: 1004 *speed = HCLGE_MAC_SPEED_10G; 1005 break; 1006 case 2: 1007 *speed = HCLGE_MAC_SPEED_25G; 1008 break; 1009 case 3: 1010 *speed = HCLGE_MAC_SPEED_40G; 1011 break; 1012 case 4: 1013 *speed = HCLGE_MAC_SPEED_50G; 1014 break; 1015 case 5: 1016 *speed = HCLGE_MAC_SPEED_100G; 1017 break; 1018 default: 1019 return -EINVAL; 1020 } 1021 1022 return 0; 1023 } 1024 1025 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, 1026 u8 speed_ability) 1027 { 1028 unsigned long *supported = hdev->hw.mac.supported; 1029 1030 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1031 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 1032 supported); 1033 1034 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 1035 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 1036 supported); 1037 1038 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 1039 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1040 supported); 1041 1042 if (speed_ability & HCLGE_SUPPORT_50G_BIT) 1043 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 1044 supported); 1045 1046 if (speed_ability & HCLGE_SUPPORT_100G_BIT) 1047 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 1048 supported); 1049 1050 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); 1051 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); 1052 } 1053 1054 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) 1055 { 1056 u8 media_type = hdev->hw.mac.media_type; 1057 1058 if (media_type != HNAE3_MEDIA_TYPE_FIBER) 1059 return; 1060 1061 hclge_parse_fiber_link_mode(hdev, speed_ability); 1062 } 1063 1064 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) 1065 { 1066 struct hclge_cfg_param_cmd *req; 1067 u64 mac_addr_tmp_high; 1068 u64 mac_addr_tmp; 1069 int i; 1070 1071 req = (struct hclge_cfg_param_cmd *)desc[0].data; 1072 1073 /* get the configuration */ 1074 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), 1075 HCLGE_CFG_VMDQ_M, 1076 HCLGE_CFG_VMDQ_S); 1077 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]), 1078 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); 1079 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]), 1080 HCLGE_CFG_TQP_DESC_N_M, 1081 HCLGE_CFG_TQP_DESC_N_S); 1082 1083 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]), 1084 HCLGE_CFG_PHY_ADDR_M, 1085 HCLGE_CFG_PHY_ADDR_S); 1086 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]), 1087 HCLGE_CFG_MEDIA_TP_M, 1088 HCLGE_CFG_MEDIA_TP_S); 1089 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]), 1090 HCLGE_CFG_RX_BUF_LEN_M, 1091 HCLGE_CFG_RX_BUF_LEN_S); 1092 /* get mac_address */ 1093 mac_addr_tmp = __le32_to_cpu(req->param[2]); 1094 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]), 1095 HCLGE_CFG_MAC_ADDR_H_M, 1096 HCLGE_CFG_MAC_ADDR_H_S); 1097 1098 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 1099 1100 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), 1101 HCLGE_CFG_DEFAULT_SPEED_M, 1102 HCLGE_CFG_DEFAULT_SPEED_S); 1103 cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]), 1104 HCLGE_CFG_RSS_SIZE_M, 1105 HCLGE_CFG_RSS_SIZE_S); 1106 1107 for (i = 0; i < ETH_ALEN; i++) 1108 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 1109 1110 req = (struct hclge_cfg_param_cmd *)desc[1].data; 1111 cfg->numa_node_map = __le32_to_cpu(req->param[0]); 1112 1113 cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]), 1114 HCLGE_CFG_SPEED_ABILITY_M, 1115 HCLGE_CFG_SPEED_ABILITY_S); 1116 } 1117 1118 /* hclge_get_cfg: query the static parameter from flash 1119 * @hdev: pointer to struct hclge_dev 1120 * @hcfg: the config structure to be getted 1121 */ 1122 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) 1123 { 1124 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; 1125 struct hclge_cfg_param_cmd *req; 1126 int i, ret; 1127 1128 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { 1129 u32 offset = 0; 1130 1131 req = (struct hclge_cfg_param_cmd *)desc[i].data; 1132 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, 1133 true); 1134 hnae_set_field(offset, HCLGE_CFG_OFFSET_M, 1135 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); 1136 /* Len should be united by 4 bytes when send to hardware */ 1137 hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, 1138 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); 1139 req->offset = cpu_to_le32(offset); 1140 } 1141 1142 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); 1143 if (ret) { 1144 dev_err(&hdev->pdev->dev, 1145 "get config failed %d.\n", ret); 1146 return ret; 1147 } 1148 1149 hclge_parse_cfg(hcfg, desc); 1150 return 0; 1151 } 1152 1153 static int hclge_get_cap(struct hclge_dev *hdev) 1154 { 1155 int ret; 1156 1157 ret = hclge_query_function_status(hdev); 1158 if (ret) { 1159 dev_err(&hdev->pdev->dev, 1160 "query function status error %d.\n", ret); 1161 return ret; 1162 } 1163 1164 /* get pf resource */ 1165 ret = hclge_query_pf_resource(hdev); 1166 if (ret) { 1167 dev_err(&hdev->pdev->dev, 1168 "query pf resource error %d.\n", ret); 1169 return ret; 1170 } 1171 1172 return 0; 1173 } 1174 1175 static int hclge_configure(struct hclge_dev *hdev) 1176 { 1177 struct hclge_cfg cfg; 1178 int ret, i; 1179 1180 ret = hclge_get_cfg(hdev, &cfg); 1181 if (ret) { 1182 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); 1183 return ret; 1184 } 1185 1186 hdev->num_vmdq_vport = cfg.vmdq_vport_num; 1187 hdev->base_tqp_pid = 0; 1188 hdev->rss_size_max = cfg.rss_size_max; 1189 hdev->rx_buf_len = cfg.rx_buf_len; 1190 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); 1191 hdev->hw.mac.media_type = cfg.media_type; 1192 hdev->hw.mac.phy_addr = cfg.phy_addr; 1193 hdev->num_desc = cfg.tqp_desc_num; 1194 hdev->tm_info.num_pg = 1; 1195 hdev->tc_max = cfg.tc_num; 1196 hdev->tm_info.hw_pfc_map = 0; 1197 1198 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); 1199 if (ret) { 1200 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); 1201 return ret; 1202 } 1203 1204 hclge_parse_link_mode(hdev, cfg.speed_ability); 1205 1206 if ((hdev->tc_max > HNAE3_MAX_TC) || 1207 (hdev->tc_max < 1)) { 1208 dev_warn(&hdev->pdev->dev, "TC num = %d.\n", 1209 hdev->tc_max); 1210 hdev->tc_max = 1; 1211 } 1212 1213 /* Dev does not support DCB */ 1214 if (!hnae3_dev_dcb_supported(hdev)) { 1215 hdev->tc_max = 1; 1216 hdev->pfc_max = 0; 1217 } else { 1218 hdev->pfc_max = hdev->tc_max; 1219 } 1220 1221 hdev->tm_info.num_tc = hdev->tc_max; 1222 1223 /* Currently not support uncontiuous tc */ 1224 for (i = 0; i < hdev->tm_info.num_tc; i++) 1225 hnae_set_bit(hdev->hw_tc_map, i, 1); 1226 1227 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; 1228 1229 return ret; 1230 } 1231 1232 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, 1233 int tso_mss_max) 1234 { 1235 struct hclge_cfg_tso_status_cmd *req; 1236 struct hclge_desc desc; 1237 u16 tso_mss; 1238 1239 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); 1240 1241 req = (struct hclge_cfg_tso_status_cmd *)desc.data; 1242 1243 tso_mss = 0; 1244 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1245 HCLGE_TSO_MSS_MIN_S, tso_mss_min); 1246 req->tso_mss_min = cpu_to_le16(tso_mss); 1247 1248 tso_mss = 0; 1249 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1250 HCLGE_TSO_MSS_MIN_S, tso_mss_max); 1251 req->tso_mss_max = cpu_to_le16(tso_mss); 1252 1253 return hclge_cmd_send(&hdev->hw, &desc, 1); 1254 } 1255 1256 static int hclge_alloc_tqps(struct hclge_dev *hdev) 1257 { 1258 struct hclge_tqp *tqp; 1259 int i; 1260 1261 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 1262 sizeof(struct hclge_tqp), GFP_KERNEL); 1263 if (!hdev->htqp) 1264 return -ENOMEM; 1265 1266 tqp = hdev->htqp; 1267 1268 for (i = 0; i < hdev->num_tqps; i++) { 1269 tqp->dev = &hdev->pdev->dev; 1270 tqp->index = i; 1271 1272 tqp->q.ae_algo = &ae_algo; 1273 tqp->q.buf_size = hdev->rx_buf_len; 1274 tqp->q.desc_num = hdev->num_desc; 1275 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + 1276 i * HCLGE_TQP_REG_SIZE; 1277 1278 tqp++; 1279 } 1280 1281 return 0; 1282 } 1283 1284 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, 1285 u16 tqp_pid, u16 tqp_vid, bool is_pf) 1286 { 1287 struct hclge_tqp_map_cmd *req; 1288 struct hclge_desc desc; 1289 int ret; 1290 1291 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); 1292 1293 req = (struct hclge_tqp_map_cmd *)desc.data; 1294 req->tqp_id = cpu_to_le16(tqp_pid); 1295 req->tqp_vf = func_id; 1296 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | 1297 1 << HCLGE_TQP_MAP_EN_B; 1298 req->tqp_vid = cpu_to_le16(tqp_vid); 1299 1300 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1301 if (ret) { 1302 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", 1303 ret); 1304 return ret; 1305 } 1306 1307 return 0; 1308 } 1309 1310 static int hclge_assign_tqp(struct hclge_vport *vport, 1311 struct hnae3_queue **tqp, u16 num_tqps) 1312 { 1313 struct hclge_dev *hdev = vport->back; 1314 int i, alloced; 1315 1316 for (i = 0, alloced = 0; i < hdev->num_tqps && 1317 alloced < num_tqps; i++) { 1318 if (!hdev->htqp[i].alloced) { 1319 hdev->htqp[i].q.handle = &vport->nic; 1320 hdev->htqp[i].q.tqp_index = alloced; 1321 tqp[alloced] = &hdev->htqp[i].q; 1322 hdev->htqp[i].alloced = true; 1323 alloced++; 1324 } 1325 } 1326 vport->alloc_tqps = num_tqps; 1327 1328 return 0; 1329 } 1330 1331 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) 1332 { 1333 struct hnae3_handle *nic = &vport->nic; 1334 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1335 struct hclge_dev *hdev = vport->back; 1336 int i, ret; 1337 1338 kinfo->num_desc = hdev->num_desc; 1339 kinfo->rx_buf_len = hdev->rx_buf_len; 1340 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); 1341 kinfo->rss_size 1342 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); 1343 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; 1344 1345 for (i = 0; i < HNAE3_MAX_TC; i++) { 1346 if (hdev->hw_tc_map & BIT(i)) { 1347 kinfo->tc_info[i].enable = true; 1348 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; 1349 kinfo->tc_info[i].tqp_count = kinfo->rss_size; 1350 kinfo->tc_info[i].tc = i; 1351 } else { 1352 /* Set to default queue if TC is disable */ 1353 kinfo->tc_info[i].enable = false; 1354 kinfo->tc_info[i].tqp_offset = 0; 1355 kinfo->tc_info[i].tqp_count = 1; 1356 kinfo->tc_info[i].tc = 0; 1357 } 1358 } 1359 1360 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 1361 sizeof(struct hnae3_queue *), GFP_KERNEL); 1362 if (!kinfo->tqp) 1363 return -ENOMEM; 1364 1365 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps); 1366 if (ret) { 1367 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); 1368 return -EINVAL; 1369 } 1370 1371 return 0; 1372 } 1373 1374 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, 1375 struct hclge_vport *vport) 1376 { 1377 struct hnae3_handle *nic = &vport->nic; 1378 struct hnae3_knic_private_info *kinfo; 1379 u16 i; 1380 1381 kinfo = &nic->kinfo; 1382 for (i = 0; i < kinfo->num_tqps; i++) { 1383 struct hclge_tqp *q = 1384 container_of(kinfo->tqp[i], struct hclge_tqp, q); 1385 bool is_pf; 1386 int ret; 1387 1388 is_pf = !(vport->vport_id); 1389 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, 1390 i, is_pf); 1391 if (ret) 1392 return ret; 1393 } 1394 1395 return 0; 1396 } 1397 1398 static int hclge_map_tqp(struct hclge_dev *hdev) 1399 { 1400 struct hclge_vport *vport = hdev->vport; 1401 u16 i, num_vport; 1402 1403 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1404 for (i = 0; i < num_vport; i++) { 1405 int ret; 1406 1407 ret = hclge_map_tqp_to_vport(hdev, vport); 1408 if (ret) 1409 return ret; 1410 1411 vport++; 1412 } 1413 1414 return 0; 1415 } 1416 1417 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) 1418 { 1419 /* this would be initialized later */ 1420 } 1421 1422 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) 1423 { 1424 struct hnae3_handle *nic = &vport->nic; 1425 struct hclge_dev *hdev = vport->back; 1426 int ret; 1427 1428 nic->pdev = hdev->pdev; 1429 nic->ae_algo = &ae_algo; 1430 nic->numa_node_mask = hdev->numa_node_mask; 1431 1432 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { 1433 ret = hclge_knic_setup(vport, num_tqps); 1434 if (ret) { 1435 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", 1436 ret); 1437 return ret; 1438 } 1439 } else { 1440 hclge_unic_setup(vport, num_tqps); 1441 } 1442 1443 return 0; 1444 } 1445 1446 static int hclge_alloc_vport(struct hclge_dev *hdev) 1447 { 1448 struct pci_dev *pdev = hdev->pdev; 1449 struct hclge_vport *vport; 1450 u32 tqp_main_vport; 1451 u32 tqp_per_vport; 1452 int num_vport, i; 1453 int ret; 1454 1455 /* We need to alloc a vport for main NIC of PF */ 1456 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1457 1458 if (hdev->num_tqps < num_vport) { 1459 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", 1460 hdev->num_tqps, num_vport); 1461 return -EINVAL; 1462 } 1463 1464 /* Alloc the same number of TQPs for every vport */ 1465 tqp_per_vport = hdev->num_tqps / num_vport; 1466 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; 1467 1468 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), 1469 GFP_KERNEL); 1470 if (!vport) 1471 return -ENOMEM; 1472 1473 hdev->vport = vport; 1474 hdev->num_alloc_vport = num_vport; 1475 1476 if (IS_ENABLED(CONFIG_PCI_IOV)) 1477 hdev->num_alloc_vfs = hdev->num_req_vfs; 1478 1479 for (i = 0; i < num_vport; i++) { 1480 vport->back = hdev; 1481 vport->vport_id = i; 1482 1483 if (i == 0) 1484 ret = hclge_vport_setup(vport, tqp_main_vport); 1485 else 1486 ret = hclge_vport_setup(vport, tqp_per_vport); 1487 if (ret) { 1488 dev_err(&pdev->dev, 1489 "vport setup failed for vport %d, %d\n", 1490 i, ret); 1491 return ret; 1492 } 1493 1494 vport++; 1495 } 1496 1497 return 0; 1498 } 1499 1500 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, 1501 struct hclge_pkt_buf_alloc *buf_alloc) 1502 { 1503 /* TX buffer size is unit by 128 byte */ 1504 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 1505 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) 1506 struct hclge_tx_buff_alloc_cmd *req; 1507 struct hclge_desc desc; 1508 int ret; 1509 u8 i; 1510 1511 req = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1512 1513 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); 1514 for (i = 0; i < HCLGE_TC_NUM; i++) { 1515 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 1516 1517 req->tx_pkt_buff[i] = 1518 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | 1519 HCLGE_BUF_SIZE_UPDATE_EN_MSK); 1520 } 1521 1522 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1523 if (ret) { 1524 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", 1525 ret); 1526 return ret; 1527 } 1528 1529 return 0; 1530 } 1531 1532 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, 1533 struct hclge_pkt_buf_alloc *buf_alloc) 1534 { 1535 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); 1536 1537 if (ret) { 1538 dev_err(&hdev->pdev->dev, 1539 "tx buffer alloc failed %d\n", ret); 1540 return ret; 1541 } 1542 1543 return 0; 1544 } 1545 1546 static int hclge_get_tc_num(struct hclge_dev *hdev) 1547 { 1548 int i, cnt = 0; 1549 1550 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1551 if (hdev->hw_tc_map & BIT(i)) 1552 cnt++; 1553 return cnt; 1554 } 1555 1556 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) 1557 { 1558 int i, cnt = 0; 1559 1560 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1561 if (hdev->hw_tc_map & BIT(i) && 1562 hdev->tm_info.hw_pfc_map & BIT(i)) 1563 cnt++; 1564 return cnt; 1565 } 1566 1567 /* Get the number of pfc enabled TCs, which have private buffer */ 1568 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, 1569 struct hclge_pkt_buf_alloc *buf_alloc) 1570 { 1571 struct hclge_priv_buf *priv; 1572 int i, cnt = 0; 1573 1574 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1575 priv = &buf_alloc->priv_buf[i]; 1576 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && 1577 priv->enable) 1578 cnt++; 1579 } 1580 1581 return cnt; 1582 } 1583 1584 /* Get the number of pfc disabled TCs, which have private buffer */ 1585 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, 1586 struct hclge_pkt_buf_alloc *buf_alloc) 1587 { 1588 struct hclge_priv_buf *priv; 1589 int i, cnt = 0; 1590 1591 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1592 priv = &buf_alloc->priv_buf[i]; 1593 if (hdev->hw_tc_map & BIT(i) && 1594 !(hdev->tm_info.hw_pfc_map & BIT(i)) && 1595 priv->enable) 1596 cnt++; 1597 } 1598 1599 return cnt; 1600 } 1601 1602 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1603 { 1604 struct hclge_priv_buf *priv; 1605 u32 rx_priv = 0; 1606 int i; 1607 1608 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1609 priv = &buf_alloc->priv_buf[i]; 1610 if (priv->enable) 1611 rx_priv += priv->buf_size; 1612 } 1613 return rx_priv; 1614 } 1615 1616 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1617 { 1618 u32 i, total_tx_size = 0; 1619 1620 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1621 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 1622 1623 return total_tx_size; 1624 } 1625 1626 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, 1627 struct hclge_pkt_buf_alloc *buf_alloc, 1628 u32 rx_all) 1629 { 1630 u32 shared_buf_min, shared_buf_tc, shared_std; 1631 int tc_num, pfc_enable_num; 1632 u32 shared_buf; 1633 u32 rx_priv; 1634 int i; 1635 1636 tc_num = hclge_get_tc_num(hdev); 1637 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); 1638 1639 if (hnae3_dev_dcb_supported(hdev)) 1640 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; 1641 else 1642 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; 1643 1644 shared_buf_tc = pfc_enable_num * hdev->mps + 1645 (tc_num - pfc_enable_num) * hdev->mps / 2 + 1646 hdev->mps; 1647 shared_std = max_t(u32, shared_buf_min, shared_buf_tc); 1648 1649 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); 1650 if (rx_all <= rx_priv + shared_std) 1651 return false; 1652 1653 shared_buf = rx_all - rx_priv; 1654 buf_alloc->s_buf.buf_size = shared_buf; 1655 buf_alloc->s_buf.self.high = shared_buf; 1656 buf_alloc->s_buf.self.low = 2 * hdev->mps; 1657 1658 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1659 if ((hdev->hw_tc_map & BIT(i)) && 1660 (hdev->tm_info.hw_pfc_map & BIT(i))) { 1661 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; 1662 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; 1663 } else { 1664 buf_alloc->s_buf.tc_thrd[i].low = 0; 1665 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; 1666 } 1667 } 1668 1669 return true; 1670 } 1671 1672 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, 1673 struct hclge_pkt_buf_alloc *buf_alloc) 1674 { 1675 u32 i, total_size; 1676 1677 total_size = hdev->pkt_buf_size; 1678 1679 /* alloc tx buffer for all enabled tc */ 1680 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1681 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1682 1683 if (total_size < HCLGE_DEFAULT_TX_BUF) 1684 return -ENOMEM; 1685 1686 if (hdev->hw_tc_map & BIT(i)) 1687 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; 1688 else 1689 priv->tx_buf_size = 0; 1690 1691 total_size -= priv->tx_buf_size; 1692 } 1693 1694 return 0; 1695 } 1696 1697 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs 1698 * @hdev: pointer to struct hclge_dev 1699 * @buf_alloc: pointer to buffer calculation data 1700 * @return: 0: calculate sucessful, negative: fail 1701 */ 1702 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, 1703 struct hclge_pkt_buf_alloc *buf_alloc) 1704 { 1705 u32 rx_all = hdev->pkt_buf_size; 1706 int no_pfc_priv_num, pfc_priv_num; 1707 struct hclge_priv_buf *priv; 1708 int i; 1709 1710 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); 1711 1712 /* When DCB is not supported, rx private 1713 * buffer is not allocated. 1714 */ 1715 if (!hnae3_dev_dcb_supported(hdev)) { 1716 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1717 return -ENOMEM; 1718 1719 return 0; 1720 } 1721 1722 /* step 1, try to alloc private buffer for all enabled tc */ 1723 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1724 priv = &buf_alloc->priv_buf[i]; 1725 if (hdev->hw_tc_map & BIT(i)) { 1726 priv->enable = 1; 1727 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1728 priv->wl.low = hdev->mps; 1729 priv->wl.high = priv->wl.low + hdev->mps; 1730 priv->buf_size = priv->wl.high + 1731 HCLGE_DEFAULT_DV; 1732 } else { 1733 priv->wl.low = 0; 1734 priv->wl.high = 2 * hdev->mps; 1735 priv->buf_size = priv->wl.high; 1736 } 1737 } else { 1738 priv->enable = 0; 1739 priv->wl.low = 0; 1740 priv->wl.high = 0; 1741 priv->buf_size = 0; 1742 } 1743 } 1744 1745 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1746 return 0; 1747 1748 /* step 2, try to decrease the buffer size of 1749 * no pfc TC's private buffer 1750 */ 1751 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1752 priv = &buf_alloc->priv_buf[i]; 1753 1754 priv->enable = 0; 1755 priv->wl.low = 0; 1756 priv->wl.high = 0; 1757 priv->buf_size = 0; 1758 1759 if (!(hdev->hw_tc_map & BIT(i))) 1760 continue; 1761 1762 priv->enable = 1; 1763 1764 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1765 priv->wl.low = 128; 1766 priv->wl.high = priv->wl.low + hdev->mps; 1767 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; 1768 } else { 1769 priv->wl.low = 0; 1770 priv->wl.high = hdev->mps; 1771 priv->buf_size = priv->wl.high; 1772 } 1773 } 1774 1775 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1776 return 0; 1777 1778 /* step 3, try to reduce the number of pfc disabled TCs, 1779 * which have private buffer 1780 */ 1781 /* get the total no pfc enable TC number, which have private buffer */ 1782 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); 1783 1784 /* let the last to be cleared first */ 1785 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1786 priv = &buf_alloc->priv_buf[i]; 1787 1788 if (hdev->hw_tc_map & BIT(i) && 1789 !(hdev->tm_info.hw_pfc_map & BIT(i))) { 1790 /* Clear the no pfc TC private buffer */ 1791 priv->wl.low = 0; 1792 priv->wl.high = 0; 1793 priv->buf_size = 0; 1794 priv->enable = 0; 1795 no_pfc_priv_num--; 1796 } 1797 1798 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1799 no_pfc_priv_num == 0) 1800 break; 1801 } 1802 1803 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1804 return 0; 1805 1806 /* step 4, try to reduce the number of pfc enabled TCs 1807 * which have private buffer. 1808 */ 1809 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); 1810 1811 /* let the last to be cleared first */ 1812 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1813 priv = &buf_alloc->priv_buf[i]; 1814 1815 if (hdev->hw_tc_map & BIT(i) && 1816 hdev->tm_info.hw_pfc_map & BIT(i)) { 1817 /* Reduce the number of pfc TC with private buffer */ 1818 priv->wl.low = 0; 1819 priv->enable = 0; 1820 priv->wl.high = 0; 1821 priv->buf_size = 0; 1822 pfc_priv_num--; 1823 } 1824 1825 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1826 pfc_priv_num == 0) 1827 break; 1828 } 1829 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1830 return 0; 1831 1832 return -ENOMEM; 1833 } 1834 1835 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, 1836 struct hclge_pkt_buf_alloc *buf_alloc) 1837 { 1838 struct hclge_rx_priv_buff_cmd *req; 1839 struct hclge_desc desc; 1840 int ret; 1841 int i; 1842 1843 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); 1844 req = (struct hclge_rx_priv_buff_cmd *)desc.data; 1845 1846 /* Alloc private buffer TCs */ 1847 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1848 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1849 1850 req->buf_num[i] = 1851 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); 1852 req->buf_num[i] |= 1853 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); 1854 } 1855 1856 req->shared_buf = 1857 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 1858 (1 << HCLGE_TC0_PRI_BUF_EN_B)); 1859 1860 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1861 if (ret) { 1862 dev_err(&hdev->pdev->dev, 1863 "rx private buffer alloc cmd failed %d\n", ret); 1864 return ret; 1865 } 1866 1867 return 0; 1868 } 1869 1870 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) 1871 1872 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, 1873 struct hclge_pkt_buf_alloc *buf_alloc) 1874 { 1875 struct hclge_rx_priv_wl_buf *req; 1876 struct hclge_priv_buf *priv; 1877 struct hclge_desc desc[2]; 1878 int i, j; 1879 int ret; 1880 1881 for (i = 0; i < 2; i++) { 1882 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, 1883 false); 1884 req = (struct hclge_rx_priv_wl_buf *)desc[i].data; 1885 1886 /* The first descriptor set the NEXT bit to 1 */ 1887 if (i == 0) 1888 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1889 else 1890 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1891 1892 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1893 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; 1894 1895 priv = &buf_alloc->priv_buf[idx]; 1896 req->tc_wl[j].high = 1897 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); 1898 req->tc_wl[j].high |= 1899 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) << 1900 HCLGE_RX_PRIV_EN_B); 1901 req->tc_wl[j].low = 1902 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); 1903 req->tc_wl[j].low |= 1904 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) << 1905 HCLGE_RX_PRIV_EN_B); 1906 } 1907 } 1908 1909 /* Send 2 descriptor at one time */ 1910 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1911 if (ret) { 1912 dev_err(&hdev->pdev->dev, 1913 "rx private waterline config cmd failed %d\n", 1914 ret); 1915 return ret; 1916 } 1917 return 0; 1918 } 1919 1920 static int hclge_common_thrd_config(struct hclge_dev *hdev, 1921 struct hclge_pkt_buf_alloc *buf_alloc) 1922 { 1923 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; 1924 struct hclge_rx_com_thrd *req; 1925 struct hclge_desc desc[2]; 1926 struct hclge_tc_thrd *tc; 1927 int i, j; 1928 int ret; 1929 1930 for (i = 0; i < 2; i++) { 1931 hclge_cmd_setup_basic_desc(&desc[i], 1932 HCLGE_OPC_RX_COM_THRD_ALLOC, false); 1933 req = (struct hclge_rx_com_thrd *)&desc[i].data; 1934 1935 /* The first descriptor set the NEXT bit to 1 */ 1936 if (i == 0) 1937 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1938 else 1939 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1940 1941 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1942 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; 1943 1944 req->com_thrd[j].high = 1945 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); 1946 req->com_thrd[j].high |= 1947 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) << 1948 HCLGE_RX_PRIV_EN_B); 1949 req->com_thrd[j].low = 1950 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); 1951 req->com_thrd[j].low |= 1952 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) << 1953 HCLGE_RX_PRIV_EN_B); 1954 } 1955 } 1956 1957 /* Send 2 descriptors at one time */ 1958 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1959 if (ret) { 1960 dev_err(&hdev->pdev->dev, 1961 "common threshold config cmd failed %d\n", ret); 1962 return ret; 1963 } 1964 return 0; 1965 } 1966 1967 static int hclge_common_wl_config(struct hclge_dev *hdev, 1968 struct hclge_pkt_buf_alloc *buf_alloc) 1969 { 1970 struct hclge_shared_buf *buf = &buf_alloc->s_buf; 1971 struct hclge_rx_com_wl *req; 1972 struct hclge_desc desc; 1973 int ret; 1974 1975 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); 1976 1977 req = (struct hclge_rx_com_wl *)desc.data; 1978 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); 1979 req->com_wl.high |= 1980 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) << 1981 HCLGE_RX_PRIV_EN_B); 1982 1983 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); 1984 req->com_wl.low |= 1985 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) << 1986 HCLGE_RX_PRIV_EN_B); 1987 1988 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1989 if (ret) { 1990 dev_err(&hdev->pdev->dev, 1991 "common waterline config cmd failed %d\n", ret); 1992 return ret; 1993 } 1994 1995 return 0; 1996 } 1997 1998 int hclge_buffer_alloc(struct hclge_dev *hdev) 1999 { 2000 struct hclge_pkt_buf_alloc *pkt_buf; 2001 int ret; 2002 2003 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); 2004 if (!pkt_buf) 2005 return -ENOMEM; 2006 2007 ret = hclge_tx_buffer_calc(hdev, pkt_buf); 2008 if (ret) { 2009 dev_err(&hdev->pdev->dev, 2010 "could not calc tx buffer size for all TCs %d\n", ret); 2011 goto out; 2012 } 2013 2014 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); 2015 if (ret) { 2016 dev_err(&hdev->pdev->dev, 2017 "could not alloc tx buffers %d\n", ret); 2018 goto out; 2019 } 2020 2021 ret = hclge_rx_buffer_calc(hdev, pkt_buf); 2022 if (ret) { 2023 dev_err(&hdev->pdev->dev, 2024 "could not calc rx priv buffer size for all TCs %d\n", 2025 ret); 2026 goto out; 2027 } 2028 2029 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); 2030 if (ret) { 2031 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", 2032 ret); 2033 goto out; 2034 } 2035 2036 if (hnae3_dev_dcb_supported(hdev)) { 2037 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); 2038 if (ret) { 2039 dev_err(&hdev->pdev->dev, 2040 "could not configure rx private waterline %d\n", 2041 ret); 2042 goto out; 2043 } 2044 2045 ret = hclge_common_thrd_config(hdev, pkt_buf); 2046 if (ret) { 2047 dev_err(&hdev->pdev->dev, 2048 "could not configure common threshold %d\n", 2049 ret); 2050 goto out; 2051 } 2052 } 2053 2054 ret = hclge_common_wl_config(hdev, pkt_buf); 2055 if (ret) 2056 dev_err(&hdev->pdev->dev, 2057 "could not configure common waterline %d\n", ret); 2058 2059 out: 2060 kfree(pkt_buf); 2061 return ret; 2062 } 2063 2064 static int hclge_init_roce_base_info(struct hclge_vport *vport) 2065 { 2066 struct hnae3_handle *roce = &vport->roce; 2067 struct hnae3_handle *nic = &vport->nic; 2068 2069 roce->rinfo.num_vectors = vport->back->num_roce_msi; 2070 2071 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || 2072 vport->back->num_msi_left == 0) 2073 return -EINVAL; 2074 2075 roce->rinfo.base_vector = vport->back->roce_base_vector; 2076 2077 roce->rinfo.netdev = nic->kinfo.netdev; 2078 roce->rinfo.roce_io_base = vport->back->hw.io_base; 2079 2080 roce->pdev = nic->pdev; 2081 roce->ae_algo = nic->ae_algo; 2082 roce->numa_node_mask = nic->numa_node_mask; 2083 2084 return 0; 2085 } 2086 2087 static int hclge_init_msi(struct hclge_dev *hdev) 2088 { 2089 struct pci_dev *pdev = hdev->pdev; 2090 int vectors; 2091 int i; 2092 2093 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 2094 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2095 if (vectors < 0) { 2096 dev_err(&pdev->dev, 2097 "failed(%d) to allocate MSI/MSI-X vectors\n", 2098 vectors); 2099 return vectors; 2100 } 2101 if (vectors < hdev->num_msi) 2102 dev_warn(&hdev->pdev->dev, 2103 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2104 hdev->num_msi, vectors); 2105 2106 hdev->num_msi = vectors; 2107 hdev->num_msi_left = vectors; 2108 hdev->base_msi_vector = pdev->irq; 2109 hdev->roce_base_vector = hdev->base_msi_vector + 2110 HCLGE_ROCE_VECTOR_OFFSET; 2111 2112 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2113 sizeof(u16), GFP_KERNEL); 2114 if (!hdev->vector_status) { 2115 pci_free_irq_vectors(pdev); 2116 return -ENOMEM; 2117 } 2118 2119 for (i = 0; i < hdev->num_msi; i++) 2120 hdev->vector_status[i] = HCLGE_INVALID_VPORT; 2121 2122 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2123 sizeof(int), GFP_KERNEL); 2124 if (!hdev->vector_irq) { 2125 pci_free_irq_vectors(pdev); 2126 return -ENOMEM; 2127 } 2128 2129 return 0; 2130 } 2131 2132 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) 2133 { 2134 struct hclge_mac *mac = &hdev->hw.mac; 2135 2136 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) 2137 mac->duplex = (u8)duplex; 2138 else 2139 mac->duplex = HCLGE_MAC_FULL; 2140 2141 mac->speed = speed; 2142 } 2143 2144 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) 2145 { 2146 struct hclge_config_mac_speed_dup_cmd *req; 2147 struct hclge_desc desc; 2148 int ret; 2149 2150 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 2151 2152 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); 2153 2154 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); 2155 2156 switch (speed) { 2157 case HCLGE_MAC_SPEED_10M: 2158 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2159 HCLGE_CFG_SPEED_S, 6); 2160 break; 2161 case HCLGE_MAC_SPEED_100M: 2162 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2163 HCLGE_CFG_SPEED_S, 7); 2164 break; 2165 case HCLGE_MAC_SPEED_1G: 2166 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2167 HCLGE_CFG_SPEED_S, 0); 2168 break; 2169 case HCLGE_MAC_SPEED_10G: 2170 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2171 HCLGE_CFG_SPEED_S, 1); 2172 break; 2173 case HCLGE_MAC_SPEED_25G: 2174 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2175 HCLGE_CFG_SPEED_S, 2); 2176 break; 2177 case HCLGE_MAC_SPEED_40G: 2178 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2179 HCLGE_CFG_SPEED_S, 3); 2180 break; 2181 case HCLGE_MAC_SPEED_50G: 2182 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2183 HCLGE_CFG_SPEED_S, 4); 2184 break; 2185 case HCLGE_MAC_SPEED_100G: 2186 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2187 HCLGE_CFG_SPEED_S, 5); 2188 break; 2189 default: 2190 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); 2191 return -EINVAL; 2192 } 2193 2194 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 2195 1); 2196 2197 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2198 if (ret) { 2199 dev_err(&hdev->pdev->dev, 2200 "mac speed/duplex config cmd failed %d.\n", ret); 2201 return ret; 2202 } 2203 2204 hclge_check_speed_dup(hdev, duplex, speed); 2205 2206 return 0; 2207 } 2208 2209 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, 2210 u8 duplex) 2211 { 2212 struct hclge_vport *vport = hclge_get_vport(handle); 2213 struct hclge_dev *hdev = vport->back; 2214 2215 return hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2216 } 2217 2218 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, 2219 u8 *duplex) 2220 { 2221 struct hclge_query_an_speed_dup_cmd *req; 2222 struct hclge_desc desc; 2223 int speed_tmp; 2224 int ret; 2225 2226 req = (struct hclge_query_an_speed_dup_cmd *)desc.data; 2227 2228 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); 2229 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2230 if (ret) { 2231 dev_err(&hdev->pdev->dev, 2232 "mac speed/autoneg/duplex query cmd failed %d\n", 2233 ret); 2234 return ret; 2235 } 2236 2237 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); 2238 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, 2239 HCLGE_QUERY_SPEED_S); 2240 2241 ret = hclge_parse_speed(speed_tmp, speed); 2242 if (ret) { 2243 dev_err(&hdev->pdev->dev, 2244 "could not parse speed(=%d), %d\n", speed_tmp, ret); 2245 return -EIO; 2246 } 2247 2248 return 0; 2249 } 2250 2251 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) 2252 { 2253 struct hclge_config_auto_neg_cmd *req; 2254 struct hclge_desc desc; 2255 u32 flag = 0; 2256 int ret; 2257 2258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); 2259 2260 req = (struct hclge_config_auto_neg_cmd *)desc.data; 2261 hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); 2262 req->cfg_an_cmd_flag = cpu_to_le32(flag); 2263 2264 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2265 if (ret) { 2266 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", 2267 ret); 2268 return ret; 2269 } 2270 2271 return 0; 2272 } 2273 2274 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) 2275 { 2276 struct hclge_vport *vport = hclge_get_vport(handle); 2277 struct hclge_dev *hdev = vport->back; 2278 2279 return hclge_set_autoneg_en(hdev, enable); 2280 } 2281 2282 static int hclge_get_autoneg(struct hnae3_handle *handle) 2283 { 2284 struct hclge_vport *vport = hclge_get_vport(handle); 2285 struct hclge_dev *hdev = vport->back; 2286 struct phy_device *phydev = hdev->hw.mac.phydev; 2287 2288 if (phydev) 2289 return phydev->autoneg; 2290 2291 return hdev->hw.mac.autoneg; 2292 } 2293 2294 static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, 2295 bool mask_vlan, 2296 u8 *mac_mask) 2297 { 2298 struct hclge_mac_vlan_mask_entry_cmd *req; 2299 struct hclge_desc desc; 2300 int status; 2301 2302 req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; 2303 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); 2304 2305 hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, 2306 mask_vlan ? 1 : 0); 2307 ether_addr_copy(req->mac_mask, mac_mask); 2308 2309 status = hclge_cmd_send(&hdev->hw, &desc, 1); 2310 if (status) 2311 dev_err(&hdev->pdev->dev, 2312 "Config mac_vlan_mask failed for cmd_send, ret =%d\n", 2313 status); 2314 2315 return status; 2316 } 2317 2318 static int hclge_mac_init(struct hclge_dev *hdev) 2319 { 2320 struct hnae3_handle *handle = &hdev->vport[0].nic; 2321 struct net_device *netdev = handle->kinfo.netdev; 2322 struct hclge_mac *mac = &hdev->hw.mac; 2323 u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 2324 int mtu; 2325 int ret; 2326 2327 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); 2328 if (ret) { 2329 dev_err(&hdev->pdev->dev, 2330 "Config mac speed dup fail ret=%d\n", ret); 2331 return ret; 2332 } 2333 2334 mac->link = 0; 2335 2336 /* Initialize the MTA table work mode */ 2337 hdev->accept_mta_mc = true; 2338 hdev->enable_mta = true; 2339 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; 2340 2341 ret = hclge_set_mta_filter_mode(hdev, 2342 hdev->mta_mac_sel_type, 2343 hdev->enable_mta); 2344 if (ret) { 2345 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", 2346 ret); 2347 return ret; 2348 } 2349 2350 ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); 2351 if (ret) { 2352 dev_err(&hdev->pdev->dev, 2353 "set mta filter mode fail ret=%d\n", ret); 2354 return ret; 2355 } 2356 2357 ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); 2358 if (ret) { 2359 dev_err(&hdev->pdev->dev, 2360 "set default mac_vlan_mask fail ret=%d\n", ret); 2361 return ret; 2362 } 2363 2364 if (netdev) 2365 mtu = netdev->mtu; 2366 else 2367 mtu = ETH_DATA_LEN; 2368 2369 ret = hclge_set_mtu(handle, mtu); 2370 if (ret) { 2371 dev_err(&hdev->pdev->dev, 2372 "set mtu failed ret=%d\n", ret); 2373 return ret; 2374 } 2375 2376 return 0; 2377 } 2378 2379 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) 2380 { 2381 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2382 schedule_work(&hdev->mbx_service_task); 2383 } 2384 2385 static void hclge_reset_task_schedule(struct hclge_dev *hdev) 2386 { 2387 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) 2388 schedule_work(&hdev->rst_service_task); 2389 } 2390 2391 static void hclge_task_schedule(struct hclge_dev *hdev) 2392 { 2393 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && 2394 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2395 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) 2396 (void)schedule_work(&hdev->service_task); 2397 } 2398 2399 static int hclge_get_mac_link_status(struct hclge_dev *hdev) 2400 { 2401 struct hclge_link_status_cmd *req; 2402 struct hclge_desc desc; 2403 int link_status; 2404 int ret; 2405 2406 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); 2407 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2408 if (ret) { 2409 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", 2410 ret); 2411 return ret; 2412 } 2413 2414 req = (struct hclge_link_status_cmd *)desc.data; 2415 link_status = req->status & HCLGE_LINK_STATUS; 2416 2417 return !!link_status; 2418 } 2419 2420 static int hclge_get_mac_phy_link(struct hclge_dev *hdev) 2421 { 2422 int mac_state; 2423 int link_stat; 2424 2425 mac_state = hclge_get_mac_link_status(hdev); 2426 2427 if (hdev->hw.mac.phydev) { 2428 if (!genphy_read_status(hdev->hw.mac.phydev)) 2429 link_stat = mac_state & 2430 hdev->hw.mac.phydev->link; 2431 else 2432 link_stat = 0; 2433 2434 } else { 2435 link_stat = mac_state; 2436 } 2437 2438 return !!link_stat; 2439 } 2440 2441 static void hclge_update_link_status(struct hclge_dev *hdev) 2442 { 2443 struct hnae3_client *client = hdev->nic_client; 2444 struct hnae3_handle *handle; 2445 int state; 2446 int i; 2447 2448 if (!client) 2449 return; 2450 state = hclge_get_mac_phy_link(hdev); 2451 if (state != hdev->hw.mac.link) { 2452 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2453 handle = &hdev->vport[i].nic; 2454 client->ops->link_status_change(handle, state); 2455 } 2456 hdev->hw.mac.link = state; 2457 } 2458 } 2459 2460 static int hclge_update_speed_duplex(struct hclge_dev *hdev) 2461 { 2462 struct hclge_mac mac = hdev->hw.mac; 2463 u8 duplex; 2464 int speed; 2465 int ret; 2466 2467 /* get the speed and duplex as autoneg'result from mac cmd when phy 2468 * doesn't exit. 2469 */ 2470 if (mac.phydev || !mac.autoneg) 2471 return 0; 2472 2473 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); 2474 if (ret) { 2475 dev_err(&hdev->pdev->dev, 2476 "mac autoneg/speed/duplex query failed %d\n", ret); 2477 return ret; 2478 } 2479 2480 if ((mac.speed != speed) || (mac.duplex != duplex)) { 2481 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2482 if (ret) { 2483 dev_err(&hdev->pdev->dev, 2484 "mac speed/duplex config failed %d\n", ret); 2485 return ret; 2486 } 2487 } 2488 2489 return 0; 2490 } 2491 2492 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) 2493 { 2494 struct hclge_vport *vport = hclge_get_vport(handle); 2495 struct hclge_dev *hdev = vport->back; 2496 2497 return hclge_update_speed_duplex(hdev); 2498 } 2499 2500 static int hclge_get_status(struct hnae3_handle *handle) 2501 { 2502 struct hclge_vport *vport = hclge_get_vport(handle); 2503 struct hclge_dev *hdev = vport->back; 2504 2505 hclge_update_link_status(hdev); 2506 2507 return hdev->hw.mac.link; 2508 } 2509 2510 static void hclge_service_timer(struct timer_list *t) 2511 { 2512 struct hclge_dev *hdev = from_timer(hdev, t, service_timer); 2513 2514 mod_timer(&hdev->service_timer, jiffies + HZ); 2515 hdev->hw_stats.stats_timer++; 2516 hclge_task_schedule(hdev); 2517 } 2518 2519 static void hclge_service_complete(struct hclge_dev *hdev) 2520 { 2521 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); 2522 2523 /* Flush memory before next watchdog */ 2524 smp_mb__before_atomic(); 2525 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); 2526 } 2527 2528 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) 2529 { 2530 u32 rst_src_reg; 2531 u32 cmdq_src_reg; 2532 2533 /* fetch the events from their corresponding regs */ 2534 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); 2535 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); 2536 2537 /* Assumption: If by any chance reset and mailbox events are reported 2538 * together then we will only process reset event in this go and will 2539 * defer the processing of the mailbox events. Since, we would have not 2540 * cleared RX CMDQ event this time we would receive again another 2541 * interrupt from H/W just for the mailbox. 2542 */ 2543 2544 /* check for vector0 reset event sources */ 2545 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { 2546 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); 2547 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 2548 return HCLGE_VECTOR0_EVENT_RST; 2549 } 2550 2551 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { 2552 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); 2553 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); 2554 return HCLGE_VECTOR0_EVENT_RST; 2555 } 2556 2557 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { 2558 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); 2559 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 2560 return HCLGE_VECTOR0_EVENT_RST; 2561 } 2562 2563 /* check for vector0 mailbox(=CMDQ RX) event source */ 2564 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 2565 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); 2566 *clearval = cmdq_src_reg; 2567 return HCLGE_VECTOR0_EVENT_MBX; 2568 } 2569 2570 return HCLGE_VECTOR0_EVENT_OTHER; 2571 } 2572 2573 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, 2574 u32 regclr) 2575 { 2576 switch (event_type) { 2577 case HCLGE_VECTOR0_EVENT_RST: 2578 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); 2579 break; 2580 case HCLGE_VECTOR0_EVENT_MBX: 2581 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); 2582 break; 2583 } 2584 } 2585 2586 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) 2587 { 2588 writel(enable ? 1 : 0, vector->addr); 2589 } 2590 2591 static irqreturn_t hclge_misc_irq_handle(int irq, void *data) 2592 { 2593 struct hclge_dev *hdev = data; 2594 u32 event_cause; 2595 u32 clearval; 2596 2597 hclge_enable_vector(&hdev->misc_vector, false); 2598 event_cause = hclge_check_event_cause(hdev, &clearval); 2599 2600 /* vector 0 interrupt is shared with reset and mailbox source events.*/ 2601 switch (event_cause) { 2602 case HCLGE_VECTOR0_EVENT_RST: 2603 hclge_reset_task_schedule(hdev); 2604 break; 2605 case HCLGE_VECTOR0_EVENT_MBX: 2606 /* If we are here then, 2607 * 1. Either we are not handling any mbx task and we are not 2608 * scheduled as well 2609 * OR 2610 * 2. We could be handling a mbx task but nothing more is 2611 * scheduled. 2612 * In both cases, we should schedule mbx task as there are more 2613 * mbx messages reported by this interrupt. 2614 */ 2615 hclge_mbx_task_schedule(hdev); 2616 2617 default: 2618 dev_dbg(&hdev->pdev->dev, 2619 "received unknown or unhandled event of vector0\n"); 2620 break; 2621 } 2622 2623 /* we should clear the source of interrupt */ 2624 hclge_clear_event_cause(hdev, event_cause, clearval); 2625 hclge_enable_vector(&hdev->misc_vector, true); 2626 2627 return IRQ_HANDLED; 2628 } 2629 2630 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) 2631 { 2632 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; 2633 hdev->num_msi_left += 1; 2634 hdev->num_msi_used -= 1; 2635 } 2636 2637 static void hclge_get_misc_vector(struct hclge_dev *hdev) 2638 { 2639 struct hclge_misc_vector *vector = &hdev->misc_vector; 2640 2641 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); 2642 2643 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; 2644 hdev->vector_status[0] = 0; 2645 2646 hdev->num_msi_left -= 1; 2647 hdev->num_msi_used += 1; 2648 } 2649 2650 static int hclge_misc_irq_init(struct hclge_dev *hdev) 2651 { 2652 int ret; 2653 2654 hclge_get_misc_vector(hdev); 2655 2656 /* this would be explicitly freed in the end */ 2657 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, 2658 0, "hclge_misc", hdev); 2659 if (ret) { 2660 hclge_free_vector(hdev, 0); 2661 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", 2662 hdev->misc_vector.vector_irq); 2663 } 2664 2665 return ret; 2666 } 2667 2668 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) 2669 { 2670 free_irq(hdev->misc_vector.vector_irq, hdev); 2671 hclge_free_vector(hdev, 0); 2672 } 2673 2674 static int hclge_notify_client(struct hclge_dev *hdev, 2675 enum hnae3_reset_notify_type type) 2676 { 2677 struct hnae3_client *client = hdev->nic_client; 2678 u16 i; 2679 2680 if (!client->ops->reset_notify) 2681 return -EOPNOTSUPP; 2682 2683 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2684 struct hnae3_handle *handle = &hdev->vport[i].nic; 2685 int ret; 2686 2687 ret = client->ops->reset_notify(handle, type); 2688 if (ret) 2689 return ret; 2690 } 2691 2692 return 0; 2693 } 2694 2695 static int hclge_reset_wait(struct hclge_dev *hdev) 2696 { 2697 #define HCLGE_RESET_WATI_MS 100 2698 #define HCLGE_RESET_WAIT_CNT 5 2699 u32 val, reg, reg_bit; 2700 u32 cnt = 0; 2701 2702 switch (hdev->reset_type) { 2703 case HNAE3_GLOBAL_RESET: 2704 reg = HCLGE_GLOBAL_RESET_REG; 2705 reg_bit = HCLGE_GLOBAL_RESET_BIT; 2706 break; 2707 case HNAE3_CORE_RESET: 2708 reg = HCLGE_GLOBAL_RESET_REG; 2709 reg_bit = HCLGE_CORE_RESET_BIT; 2710 break; 2711 case HNAE3_FUNC_RESET: 2712 reg = HCLGE_FUN_RST_ING; 2713 reg_bit = HCLGE_FUN_RST_ING_B; 2714 break; 2715 default: 2716 dev_err(&hdev->pdev->dev, 2717 "Wait for unsupported reset type: %d\n", 2718 hdev->reset_type); 2719 return -EINVAL; 2720 } 2721 2722 val = hclge_read_dev(&hdev->hw, reg); 2723 while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { 2724 msleep(HCLGE_RESET_WATI_MS); 2725 val = hclge_read_dev(&hdev->hw, reg); 2726 cnt++; 2727 } 2728 2729 if (cnt >= HCLGE_RESET_WAIT_CNT) { 2730 dev_warn(&hdev->pdev->dev, 2731 "Wait for reset timeout: %d\n", hdev->reset_type); 2732 return -EBUSY; 2733 } 2734 2735 return 0; 2736 } 2737 2738 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) 2739 { 2740 struct hclge_desc desc; 2741 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; 2742 int ret; 2743 2744 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 2745 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0); 2746 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); 2747 req->fun_reset_vfid = func_id; 2748 2749 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2750 if (ret) 2751 dev_err(&hdev->pdev->dev, 2752 "send function reset cmd fail, status =%d\n", ret); 2753 2754 return ret; 2755 } 2756 2757 static void hclge_do_reset(struct hclge_dev *hdev) 2758 { 2759 struct pci_dev *pdev = hdev->pdev; 2760 u32 val; 2761 2762 switch (hdev->reset_type) { 2763 case HNAE3_GLOBAL_RESET: 2764 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2765 hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); 2766 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2767 dev_info(&pdev->dev, "Global Reset requested\n"); 2768 break; 2769 case HNAE3_CORE_RESET: 2770 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2771 hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1); 2772 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2773 dev_info(&pdev->dev, "Core Reset requested\n"); 2774 break; 2775 case HNAE3_FUNC_RESET: 2776 dev_info(&pdev->dev, "PF Reset requested\n"); 2777 hclge_func_reset_cmd(hdev, 0); 2778 /* schedule again to check later */ 2779 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); 2780 hclge_reset_task_schedule(hdev); 2781 break; 2782 default: 2783 dev_warn(&pdev->dev, 2784 "Unsupported reset type: %d\n", hdev->reset_type); 2785 break; 2786 } 2787 } 2788 2789 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, 2790 unsigned long *addr) 2791 { 2792 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 2793 2794 /* return the highest priority reset level amongst all */ 2795 if (test_bit(HNAE3_GLOBAL_RESET, addr)) 2796 rst_level = HNAE3_GLOBAL_RESET; 2797 else if (test_bit(HNAE3_CORE_RESET, addr)) 2798 rst_level = HNAE3_CORE_RESET; 2799 else if (test_bit(HNAE3_IMP_RESET, addr)) 2800 rst_level = HNAE3_IMP_RESET; 2801 else if (test_bit(HNAE3_FUNC_RESET, addr)) 2802 rst_level = HNAE3_FUNC_RESET; 2803 2804 /* now, clear all other resets */ 2805 clear_bit(HNAE3_GLOBAL_RESET, addr); 2806 clear_bit(HNAE3_CORE_RESET, addr); 2807 clear_bit(HNAE3_IMP_RESET, addr); 2808 clear_bit(HNAE3_FUNC_RESET, addr); 2809 2810 return rst_level; 2811 } 2812 2813 static void hclge_reset(struct hclge_dev *hdev) 2814 { 2815 /* perform reset of the stack & ae device for a client */ 2816 2817 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 2818 2819 if (!hclge_reset_wait(hdev)) { 2820 rtnl_lock(); 2821 hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); 2822 hclge_reset_ae_dev(hdev->ae_dev); 2823 hclge_notify_client(hdev, HNAE3_INIT_CLIENT); 2824 rtnl_unlock(); 2825 } else { 2826 /* schedule again to check pending resets later */ 2827 set_bit(hdev->reset_type, &hdev->reset_pending); 2828 hclge_reset_task_schedule(hdev); 2829 } 2830 2831 hclge_notify_client(hdev, HNAE3_UP_CLIENT); 2832 } 2833 2834 static void hclge_reset_event(struct hnae3_handle *handle) 2835 { 2836 struct hclge_vport *vport = hclge_get_vport(handle); 2837 struct hclge_dev *hdev = vport->back; 2838 2839 /* check if this is a new reset request and we are not here just because 2840 * last reset attempt did not succeed and watchdog hit us again. We will 2841 * know this if last reset request did not occur very recently (watchdog 2842 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) 2843 * In case of new request we reset the "reset level" to PF reset. 2844 */ 2845 if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) 2846 handle->reset_level = HNAE3_FUNC_RESET; 2847 2848 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", 2849 handle->reset_level); 2850 2851 /* request reset & schedule reset task */ 2852 set_bit(handle->reset_level, &hdev->reset_request); 2853 hclge_reset_task_schedule(hdev); 2854 2855 if (handle->reset_level < HNAE3_GLOBAL_RESET) 2856 handle->reset_level++; 2857 2858 handle->last_reset_time = jiffies; 2859 } 2860 2861 static void hclge_reset_subtask(struct hclge_dev *hdev) 2862 { 2863 /* check if there is any ongoing reset in the hardware. This status can 2864 * be checked from reset_pending. If there is then, we need to wait for 2865 * hardware to complete reset. 2866 * a. If we are able to figure out in reasonable time that hardware 2867 * has fully resetted then, we can proceed with driver, client 2868 * reset. 2869 * b. else, we can come back later to check this status so re-sched 2870 * now. 2871 */ 2872 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); 2873 if (hdev->reset_type != HNAE3_NONE_RESET) 2874 hclge_reset(hdev); 2875 2876 /* check if we got any *new* reset requests to be honored */ 2877 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); 2878 if (hdev->reset_type != HNAE3_NONE_RESET) 2879 hclge_do_reset(hdev); 2880 2881 hdev->reset_type = HNAE3_NONE_RESET; 2882 } 2883 2884 static void hclge_reset_service_task(struct work_struct *work) 2885 { 2886 struct hclge_dev *hdev = 2887 container_of(work, struct hclge_dev, rst_service_task); 2888 2889 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 2890 return; 2891 2892 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 2893 2894 hclge_reset_subtask(hdev); 2895 2896 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 2897 } 2898 2899 static void hclge_mailbox_service_task(struct work_struct *work) 2900 { 2901 struct hclge_dev *hdev = 2902 container_of(work, struct hclge_dev, mbx_service_task); 2903 2904 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) 2905 return; 2906 2907 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 2908 2909 hclge_mbx_handler(hdev); 2910 2911 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 2912 } 2913 2914 static void hclge_service_task(struct work_struct *work) 2915 { 2916 struct hclge_dev *hdev = 2917 container_of(work, struct hclge_dev, service_task); 2918 2919 /* The total rx/tx packets statstics are wanted to be updated 2920 * per second. Both hclge_update_stats_for_all() and 2921 * hclge_mac_get_traffic_stats() can do it. 2922 */ 2923 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { 2924 hclge_update_stats_for_all(hdev); 2925 hdev->hw_stats.stats_timer = 0; 2926 } else { 2927 hclge_mac_get_traffic_stats(hdev); 2928 } 2929 2930 hclge_update_speed_duplex(hdev); 2931 hclge_update_link_status(hdev); 2932 hclge_update_led_status(hdev); 2933 hclge_service_complete(hdev); 2934 } 2935 2936 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) 2937 { 2938 /* VF handle has no client */ 2939 if (!handle->client) 2940 return container_of(handle, struct hclge_vport, nic); 2941 else if (handle->client->type == HNAE3_CLIENT_ROCE) 2942 return container_of(handle, struct hclge_vport, roce); 2943 else 2944 return container_of(handle, struct hclge_vport, nic); 2945 } 2946 2947 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, 2948 struct hnae3_vector_info *vector_info) 2949 { 2950 struct hclge_vport *vport = hclge_get_vport(handle); 2951 struct hnae3_vector_info *vector = vector_info; 2952 struct hclge_dev *hdev = vport->back; 2953 int alloc = 0; 2954 int i, j; 2955 2956 vector_num = min(hdev->num_msi_left, vector_num); 2957 2958 for (j = 0; j < vector_num; j++) { 2959 for (i = 1; i < hdev->num_msi; i++) { 2960 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { 2961 vector->vector = pci_irq_vector(hdev->pdev, i); 2962 vector->io_addr = hdev->hw.io_base + 2963 HCLGE_VECTOR_REG_BASE + 2964 (i - 1) * HCLGE_VECTOR_REG_OFFSET + 2965 vport->vport_id * 2966 HCLGE_VECTOR_VF_OFFSET; 2967 hdev->vector_status[i] = vport->vport_id; 2968 hdev->vector_irq[i] = vector->vector; 2969 2970 vector++; 2971 alloc++; 2972 2973 break; 2974 } 2975 } 2976 } 2977 hdev->num_msi_left -= alloc; 2978 hdev->num_msi_used += alloc; 2979 2980 return alloc; 2981 } 2982 2983 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) 2984 { 2985 int i; 2986 2987 for (i = 0; i < hdev->num_msi; i++) 2988 if (vector == hdev->vector_irq[i]) 2989 return i; 2990 2991 return -EINVAL; 2992 } 2993 2994 static int hclge_put_vector(struct hnae3_handle *handle, int vector) 2995 { 2996 struct hclge_vport *vport = hclge_get_vport(handle); 2997 struct hclge_dev *hdev = vport->back; 2998 int vector_id; 2999 3000 vector_id = hclge_get_vector_index(hdev, vector); 3001 if (vector_id < 0) { 3002 dev_err(&hdev->pdev->dev, 3003 "Get vector index fail. vector_id =%d\n", vector_id); 3004 return vector_id; 3005 } 3006 3007 hclge_free_vector(hdev, vector_id); 3008 3009 return 0; 3010 } 3011 3012 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) 3013 { 3014 return HCLGE_RSS_KEY_SIZE; 3015 } 3016 3017 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) 3018 { 3019 return HCLGE_RSS_IND_TBL_SIZE; 3020 } 3021 3022 static int hclge_set_rss_algo_key(struct hclge_dev *hdev, 3023 const u8 hfunc, const u8 *key) 3024 { 3025 struct hclge_rss_config_cmd *req; 3026 struct hclge_desc desc; 3027 int key_offset; 3028 int key_size; 3029 int ret; 3030 3031 req = (struct hclge_rss_config_cmd *)desc.data; 3032 3033 for (key_offset = 0; key_offset < 3; key_offset++) { 3034 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, 3035 false); 3036 3037 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); 3038 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); 3039 3040 if (key_offset == 2) 3041 key_size = 3042 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; 3043 else 3044 key_size = HCLGE_RSS_HASH_KEY_NUM; 3045 3046 memcpy(req->hash_key, 3047 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); 3048 3049 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3050 if (ret) { 3051 dev_err(&hdev->pdev->dev, 3052 "Configure RSS config fail, status = %d\n", 3053 ret); 3054 return ret; 3055 } 3056 } 3057 return 0; 3058 } 3059 3060 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) 3061 { 3062 struct hclge_rss_indirection_table_cmd *req; 3063 struct hclge_desc desc; 3064 int i, j; 3065 int ret; 3066 3067 req = (struct hclge_rss_indirection_table_cmd *)desc.data; 3068 3069 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { 3070 hclge_cmd_setup_basic_desc 3071 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); 3072 3073 req->start_table_index = 3074 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); 3075 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); 3076 3077 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) 3078 req->rss_result[j] = 3079 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; 3080 3081 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3082 if (ret) { 3083 dev_err(&hdev->pdev->dev, 3084 "Configure rss indir table fail,status = %d\n", 3085 ret); 3086 return ret; 3087 } 3088 } 3089 return 0; 3090 } 3091 3092 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, 3093 u16 *tc_size, u16 *tc_offset) 3094 { 3095 struct hclge_rss_tc_mode_cmd *req; 3096 struct hclge_desc desc; 3097 int ret; 3098 int i; 3099 3100 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); 3101 req = (struct hclge_rss_tc_mode_cmd *)desc.data; 3102 3103 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 3104 u16 mode = 0; 3105 3106 hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); 3107 hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M, 3108 HCLGE_RSS_TC_SIZE_S, tc_size[i]); 3109 hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M, 3110 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); 3111 3112 req->rss_tc_mode[i] = cpu_to_le16(mode); 3113 } 3114 3115 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3116 if (ret) { 3117 dev_err(&hdev->pdev->dev, 3118 "Configure rss tc mode fail, status = %d\n", ret); 3119 return ret; 3120 } 3121 3122 return 0; 3123 } 3124 3125 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) 3126 { 3127 struct hclge_rss_input_tuple_cmd *req; 3128 struct hclge_desc desc; 3129 int ret; 3130 3131 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 3132 3133 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 3134 3135 /* Get the tuple cfg from pf */ 3136 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; 3137 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; 3138 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; 3139 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; 3140 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; 3141 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; 3142 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; 3143 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; 3144 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3145 if (ret) { 3146 dev_err(&hdev->pdev->dev, 3147 "Configure rss input fail, status = %d\n", ret); 3148 return ret; 3149 } 3150 3151 return 0; 3152 } 3153 3154 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, 3155 u8 *key, u8 *hfunc) 3156 { 3157 struct hclge_vport *vport = hclge_get_vport(handle); 3158 int i; 3159 3160 /* Get hash algorithm */ 3161 if (hfunc) 3162 *hfunc = vport->rss_algo; 3163 3164 /* Get the RSS Key required by the user */ 3165 if (key) 3166 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); 3167 3168 /* Get indirect table */ 3169 if (indir) 3170 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3171 indir[i] = vport->rss_indirection_tbl[i]; 3172 3173 return 0; 3174 } 3175 3176 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, 3177 const u8 *key, const u8 hfunc) 3178 { 3179 struct hclge_vport *vport = hclge_get_vport(handle); 3180 struct hclge_dev *hdev = vport->back; 3181 u8 hash_algo; 3182 int ret, i; 3183 3184 /* Set the RSS Hash Key if specififed by the user */ 3185 if (key) { 3186 3187 if (hfunc == ETH_RSS_HASH_TOP || 3188 hfunc == ETH_RSS_HASH_NO_CHANGE) 3189 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 3190 else 3191 return -EINVAL; 3192 ret = hclge_set_rss_algo_key(hdev, hash_algo, key); 3193 if (ret) 3194 return ret; 3195 3196 /* Update the shadow RSS key with user specified qids */ 3197 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); 3198 vport->rss_algo = hash_algo; 3199 } 3200 3201 /* Update the shadow RSS table with user specified qids */ 3202 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3203 vport->rss_indirection_tbl[i] = indir[i]; 3204 3205 /* Update the hardware */ 3206 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); 3207 } 3208 3209 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 3210 { 3211 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; 3212 3213 if (nfc->data & RXH_L4_B_2_3) 3214 hash_sets |= HCLGE_D_PORT_BIT; 3215 else 3216 hash_sets &= ~HCLGE_D_PORT_BIT; 3217 3218 if (nfc->data & RXH_IP_SRC) 3219 hash_sets |= HCLGE_S_IP_BIT; 3220 else 3221 hash_sets &= ~HCLGE_S_IP_BIT; 3222 3223 if (nfc->data & RXH_IP_DST) 3224 hash_sets |= HCLGE_D_IP_BIT; 3225 else 3226 hash_sets &= ~HCLGE_D_IP_BIT; 3227 3228 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 3229 hash_sets |= HCLGE_V_TAG_BIT; 3230 3231 return hash_sets; 3232 } 3233 3234 static int hclge_set_rss_tuple(struct hnae3_handle *handle, 3235 struct ethtool_rxnfc *nfc) 3236 { 3237 struct hclge_vport *vport = hclge_get_vport(handle); 3238 struct hclge_dev *hdev = vport->back; 3239 struct hclge_rss_input_tuple_cmd *req; 3240 struct hclge_desc desc; 3241 u8 tuple_sets; 3242 int ret; 3243 3244 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 3245 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 3246 return -EINVAL; 3247 3248 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 3249 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 3250 3251 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; 3252 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; 3253 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; 3254 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; 3255 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; 3256 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; 3257 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; 3258 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; 3259 3260 tuple_sets = hclge_get_rss_hash_bits(nfc); 3261 switch (nfc->flow_type) { 3262 case TCP_V4_FLOW: 3263 req->ipv4_tcp_en = tuple_sets; 3264 break; 3265 case TCP_V6_FLOW: 3266 req->ipv6_tcp_en = tuple_sets; 3267 break; 3268 case UDP_V4_FLOW: 3269 req->ipv4_udp_en = tuple_sets; 3270 break; 3271 case UDP_V6_FLOW: 3272 req->ipv6_udp_en = tuple_sets; 3273 break; 3274 case SCTP_V4_FLOW: 3275 req->ipv4_sctp_en = tuple_sets; 3276 break; 3277 case SCTP_V6_FLOW: 3278 if ((nfc->data & RXH_L4_B_0_1) || 3279 (nfc->data & RXH_L4_B_2_3)) 3280 return -EINVAL; 3281 3282 req->ipv6_sctp_en = tuple_sets; 3283 break; 3284 case IPV4_FLOW: 3285 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3286 break; 3287 case IPV6_FLOW: 3288 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3289 break; 3290 default: 3291 return -EINVAL; 3292 } 3293 3294 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3295 if (ret) { 3296 dev_err(&hdev->pdev->dev, 3297 "Set rss tuple fail, status = %d\n", ret); 3298 return ret; 3299 } 3300 3301 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 3302 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 3303 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 3304 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 3305 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 3306 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 3307 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 3308 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 3309 return 0; 3310 } 3311 3312 static int hclge_get_rss_tuple(struct hnae3_handle *handle, 3313 struct ethtool_rxnfc *nfc) 3314 { 3315 struct hclge_vport *vport = hclge_get_vport(handle); 3316 u8 tuple_sets; 3317 3318 nfc->data = 0; 3319 3320 switch (nfc->flow_type) { 3321 case TCP_V4_FLOW: 3322 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; 3323 break; 3324 case UDP_V4_FLOW: 3325 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; 3326 break; 3327 case TCP_V6_FLOW: 3328 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; 3329 break; 3330 case UDP_V6_FLOW: 3331 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; 3332 break; 3333 case SCTP_V4_FLOW: 3334 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; 3335 break; 3336 case SCTP_V6_FLOW: 3337 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; 3338 break; 3339 case IPV4_FLOW: 3340 case IPV6_FLOW: 3341 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; 3342 break; 3343 default: 3344 return -EINVAL; 3345 } 3346 3347 if (!tuple_sets) 3348 return 0; 3349 3350 if (tuple_sets & HCLGE_D_PORT_BIT) 3351 nfc->data |= RXH_L4_B_2_3; 3352 if (tuple_sets & HCLGE_S_PORT_BIT) 3353 nfc->data |= RXH_L4_B_0_1; 3354 if (tuple_sets & HCLGE_D_IP_BIT) 3355 nfc->data |= RXH_IP_DST; 3356 if (tuple_sets & HCLGE_S_IP_BIT) 3357 nfc->data |= RXH_IP_SRC; 3358 3359 return 0; 3360 } 3361 3362 static int hclge_get_tc_size(struct hnae3_handle *handle) 3363 { 3364 struct hclge_vport *vport = hclge_get_vport(handle); 3365 struct hclge_dev *hdev = vport->back; 3366 3367 return hdev->rss_size_max; 3368 } 3369 3370 int hclge_rss_init_hw(struct hclge_dev *hdev) 3371 { 3372 struct hclge_vport *vport = hdev->vport; 3373 u8 *rss_indir = vport[0].rss_indirection_tbl; 3374 u16 rss_size = vport[0].alloc_rss_size; 3375 u8 *key = vport[0].rss_hash_key; 3376 u8 hfunc = vport[0].rss_algo; 3377 u16 tc_offset[HCLGE_MAX_TC_NUM]; 3378 u16 tc_valid[HCLGE_MAX_TC_NUM]; 3379 u16 tc_size[HCLGE_MAX_TC_NUM]; 3380 u16 roundup_size; 3381 int i, ret; 3382 3383 ret = hclge_set_rss_indir_table(hdev, rss_indir); 3384 if (ret) 3385 return ret; 3386 3387 ret = hclge_set_rss_algo_key(hdev, hfunc, key); 3388 if (ret) 3389 return ret; 3390 3391 ret = hclge_set_rss_input_tuple(hdev); 3392 if (ret) 3393 return ret; 3394 3395 /* Each TC have the same queue size, and tc_size set to hardware is 3396 * the log2 of roundup power of two of rss_size, the acutal queue 3397 * size is limited by indirection table. 3398 */ 3399 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { 3400 dev_err(&hdev->pdev->dev, 3401 "Configure rss tc size failed, invalid TC_SIZE = %d\n", 3402 rss_size); 3403 return -EINVAL; 3404 } 3405 3406 roundup_size = roundup_pow_of_two(rss_size); 3407 roundup_size = ilog2(roundup_size); 3408 3409 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 3410 tc_valid[i] = 0; 3411 3412 if (!(hdev->hw_tc_map & BIT(i))) 3413 continue; 3414 3415 tc_valid[i] = 1; 3416 tc_size[i] = roundup_size; 3417 tc_offset[i] = rss_size * i; 3418 } 3419 3420 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 3421 } 3422 3423 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) 3424 { 3425 struct hclge_vport *vport = hdev->vport; 3426 int i, j; 3427 3428 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { 3429 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3430 vport[j].rss_indirection_tbl[i] = 3431 i % vport[j].alloc_rss_size; 3432 } 3433 } 3434 3435 static void hclge_rss_init_cfg(struct hclge_dev *hdev) 3436 { 3437 struct hclge_vport *vport = hdev->vport; 3438 int i; 3439 3440 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 3441 vport[i].rss_tuple_sets.ipv4_tcp_en = 3442 HCLGE_RSS_INPUT_TUPLE_OTHER; 3443 vport[i].rss_tuple_sets.ipv4_udp_en = 3444 HCLGE_RSS_INPUT_TUPLE_OTHER; 3445 vport[i].rss_tuple_sets.ipv4_sctp_en = 3446 HCLGE_RSS_INPUT_TUPLE_SCTP; 3447 vport[i].rss_tuple_sets.ipv4_fragment_en = 3448 HCLGE_RSS_INPUT_TUPLE_OTHER; 3449 vport[i].rss_tuple_sets.ipv6_tcp_en = 3450 HCLGE_RSS_INPUT_TUPLE_OTHER; 3451 vport[i].rss_tuple_sets.ipv6_udp_en = 3452 HCLGE_RSS_INPUT_TUPLE_OTHER; 3453 vport[i].rss_tuple_sets.ipv6_sctp_en = 3454 HCLGE_RSS_INPUT_TUPLE_SCTP; 3455 vport[i].rss_tuple_sets.ipv6_fragment_en = 3456 HCLGE_RSS_INPUT_TUPLE_OTHER; 3457 3458 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 3459 3460 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); 3461 } 3462 3463 hclge_rss_indir_init_cfg(hdev); 3464 } 3465 3466 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 3467 int vector_id, bool en, 3468 struct hnae3_ring_chain_node *ring_chain) 3469 { 3470 struct hclge_dev *hdev = vport->back; 3471 struct hnae3_ring_chain_node *node; 3472 struct hclge_desc desc; 3473 struct hclge_ctrl_vector_chain_cmd *req 3474 = (struct hclge_ctrl_vector_chain_cmd *)desc.data; 3475 enum hclge_cmd_status status; 3476 enum hclge_opcode_type op; 3477 u16 tqp_type_and_id; 3478 int i; 3479 3480 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; 3481 hclge_cmd_setup_basic_desc(&desc, op, false); 3482 req->int_vector_id = vector_id; 3483 3484 i = 0; 3485 for (node = ring_chain; node; node = node->next) { 3486 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); 3487 hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, 3488 HCLGE_INT_TYPE_S, 3489 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); 3490 hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, 3491 HCLGE_TQP_ID_S, node->tqp_index); 3492 hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, 3493 HCLGE_INT_GL_IDX_S, 3494 hnae_get_field(node->int_gl_idx, 3495 HNAE3_RING_GL_IDX_M, 3496 HNAE3_RING_GL_IDX_S)); 3497 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); 3498 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 3499 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 3500 req->vfid = vport->vport_id; 3501 3502 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3503 if (status) { 3504 dev_err(&hdev->pdev->dev, 3505 "Map TQP fail, status is %d.\n", 3506 status); 3507 return -EIO; 3508 } 3509 i = 0; 3510 3511 hclge_cmd_setup_basic_desc(&desc, 3512 op, 3513 false); 3514 req->int_vector_id = vector_id; 3515 } 3516 } 3517 3518 if (i > 0) { 3519 req->int_cause_num = i; 3520 req->vfid = vport->vport_id; 3521 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3522 if (status) { 3523 dev_err(&hdev->pdev->dev, 3524 "Map TQP fail, status is %d.\n", status); 3525 return -EIO; 3526 } 3527 } 3528 3529 return 0; 3530 } 3531 3532 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, 3533 int vector, 3534 struct hnae3_ring_chain_node *ring_chain) 3535 { 3536 struct hclge_vport *vport = hclge_get_vport(handle); 3537 struct hclge_dev *hdev = vport->back; 3538 int vector_id; 3539 3540 vector_id = hclge_get_vector_index(hdev, vector); 3541 if (vector_id < 0) { 3542 dev_err(&hdev->pdev->dev, 3543 "Get vector index fail. vector_id =%d\n", vector_id); 3544 return vector_id; 3545 } 3546 3547 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); 3548 } 3549 3550 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, 3551 int vector, 3552 struct hnae3_ring_chain_node *ring_chain) 3553 { 3554 struct hclge_vport *vport = hclge_get_vport(handle); 3555 struct hclge_dev *hdev = vport->back; 3556 int vector_id, ret; 3557 3558 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 3559 return 0; 3560 3561 vector_id = hclge_get_vector_index(hdev, vector); 3562 if (vector_id < 0) { 3563 dev_err(&handle->pdev->dev, 3564 "Get vector index fail. ret =%d\n", vector_id); 3565 return vector_id; 3566 } 3567 3568 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); 3569 if (ret) 3570 dev_err(&handle->pdev->dev, 3571 "Unmap ring from vector fail. vectorid=%d, ret =%d\n", 3572 vector_id, 3573 ret); 3574 3575 return ret; 3576 } 3577 3578 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, 3579 struct hclge_promisc_param *param) 3580 { 3581 struct hclge_promisc_cfg_cmd *req; 3582 struct hclge_desc desc; 3583 int ret; 3584 3585 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); 3586 3587 req = (struct hclge_promisc_cfg_cmd *)desc.data; 3588 req->vf_id = param->vf_id; 3589 req->flag = (param->enable << HCLGE_PROMISC_EN_B); 3590 3591 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3592 if (ret) { 3593 dev_err(&hdev->pdev->dev, 3594 "Set promisc mode fail, status is %d.\n", ret); 3595 return ret; 3596 } 3597 return 0; 3598 } 3599 3600 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, 3601 bool en_mc, bool en_bc, int vport_id) 3602 { 3603 if (!param) 3604 return; 3605 3606 memset(param, 0, sizeof(struct hclge_promisc_param)); 3607 if (en_uc) 3608 param->enable = HCLGE_PROMISC_EN_UC; 3609 if (en_mc) 3610 param->enable |= HCLGE_PROMISC_EN_MC; 3611 if (en_bc) 3612 param->enable |= HCLGE_PROMISC_EN_BC; 3613 param->vf_id = vport_id; 3614 } 3615 3616 static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en) 3617 { 3618 struct hclge_vport *vport = hclge_get_vport(handle); 3619 struct hclge_dev *hdev = vport->back; 3620 struct hclge_promisc_param param; 3621 3622 hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); 3623 hclge_cmd_set_promisc_mode(hdev, ¶m); 3624 } 3625 3626 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) 3627 { 3628 struct hclge_desc desc; 3629 struct hclge_config_mac_mode_cmd *req = 3630 (struct hclge_config_mac_mode_cmd *)desc.data; 3631 u32 loop_en = 0; 3632 int ret; 3633 3634 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); 3635 hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); 3636 hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); 3637 hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); 3638 hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); 3639 hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); 3640 hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); 3641 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); 3642 hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); 3643 hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); 3644 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); 3645 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); 3646 hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); 3647 hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); 3648 hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); 3649 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 3650 3651 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3652 if (ret) 3653 dev_err(&hdev->pdev->dev, 3654 "mac enable fail, ret =%d.\n", ret); 3655 } 3656 3657 static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) 3658 { 3659 struct hclge_config_mac_mode_cmd *req; 3660 struct hclge_desc desc; 3661 u32 loop_en; 3662 int ret; 3663 3664 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; 3665 /* 1 Read out the MAC mode config at first */ 3666 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 3667 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3668 if (ret) { 3669 dev_err(&hdev->pdev->dev, 3670 "mac loopback get fail, ret =%d.\n", ret); 3671 return ret; 3672 } 3673 3674 /* 2 Then setup the loopback flag */ 3675 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 3676 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); 3677 3678 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 3679 3680 /* 3 Config mac work mode with loopback flag 3681 * and its original configure parameters 3682 */ 3683 hclge_cmd_reuse_desc(&desc, false); 3684 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3685 if (ret) 3686 dev_err(&hdev->pdev->dev, 3687 "mac loopback set fail, ret =%d.\n", ret); 3688 return ret; 3689 } 3690 3691 static int hclge_set_loopback(struct hnae3_handle *handle, 3692 enum hnae3_loop loop_mode, bool en) 3693 { 3694 struct hclge_vport *vport = hclge_get_vport(handle); 3695 struct hclge_dev *hdev = vport->back; 3696 int ret; 3697 3698 switch (loop_mode) { 3699 case HNAE3_MAC_INTER_LOOP_MAC: 3700 ret = hclge_set_mac_loopback(hdev, en); 3701 break; 3702 default: 3703 ret = -ENOTSUPP; 3704 dev_err(&hdev->pdev->dev, 3705 "loop_mode %d is not supported\n", loop_mode); 3706 break; 3707 } 3708 3709 return ret; 3710 } 3711 3712 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, 3713 int stream_id, bool enable) 3714 { 3715 struct hclge_desc desc; 3716 struct hclge_cfg_com_tqp_queue_cmd *req = 3717 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; 3718 int ret; 3719 3720 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 3721 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); 3722 req->stream_id = cpu_to_le16(stream_id); 3723 req->enable |= enable << HCLGE_TQP_ENABLE_B; 3724 3725 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3726 if (ret) 3727 dev_err(&hdev->pdev->dev, 3728 "Tqp enable fail, status =%d.\n", ret); 3729 return ret; 3730 } 3731 3732 static void hclge_reset_tqp_stats(struct hnae3_handle *handle) 3733 { 3734 struct hclge_vport *vport = hclge_get_vport(handle); 3735 struct hnae3_queue *queue; 3736 struct hclge_tqp *tqp; 3737 int i; 3738 3739 for (i = 0; i < vport->alloc_tqps; i++) { 3740 queue = handle->kinfo.tqp[i]; 3741 tqp = container_of(queue, struct hclge_tqp, q); 3742 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 3743 } 3744 } 3745 3746 static int hclge_ae_start(struct hnae3_handle *handle) 3747 { 3748 struct hclge_vport *vport = hclge_get_vport(handle); 3749 struct hclge_dev *hdev = vport->back; 3750 int i, ret; 3751 3752 for (i = 0; i < vport->alloc_tqps; i++) 3753 hclge_tqp_enable(hdev, i, 0, true); 3754 3755 /* mac enable */ 3756 hclge_cfg_mac_mode(hdev, true); 3757 clear_bit(HCLGE_STATE_DOWN, &hdev->state); 3758 mod_timer(&hdev->service_timer, jiffies + HZ); 3759 hdev->hw.mac.link = 0; 3760 3761 /* reset tqp stats */ 3762 hclge_reset_tqp_stats(handle); 3763 3764 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 3765 return 0; 3766 3767 ret = hclge_mac_start_phy(hdev); 3768 if (ret) 3769 return ret; 3770 3771 return 0; 3772 } 3773 3774 static void hclge_ae_stop(struct hnae3_handle *handle) 3775 { 3776 struct hclge_vport *vport = hclge_get_vport(handle); 3777 struct hclge_dev *hdev = vport->back; 3778 int i; 3779 3780 del_timer_sync(&hdev->service_timer); 3781 cancel_work_sync(&hdev->service_task); 3782 3783 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 3784 return; 3785 3786 for (i = 0; i < vport->alloc_tqps; i++) 3787 hclge_tqp_enable(hdev, i, 0, false); 3788 3789 /* Mac disable */ 3790 hclge_cfg_mac_mode(hdev, false); 3791 3792 hclge_mac_stop_phy(hdev); 3793 3794 /* reset tqp stats */ 3795 hclge_reset_tqp_stats(handle); 3796 del_timer_sync(&hdev->service_timer); 3797 cancel_work_sync(&hdev->service_task); 3798 hclge_update_link_status(hdev); 3799 } 3800 3801 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, 3802 u16 cmdq_resp, u8 resp_code, 3803 enum hclge_mac_vlan_tbl_opcode op) 3804 { 3805 struct hclge_dev *hdev = vport->back; 3806 int return_status = -EIO; 3807 3808 if (cmdq_resp) { 3809 dev_err(&hdev->pdev->dev, 3810 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", 3811 cmdq_resp); 3812 return -EIO; 3813 } 3814 3815 if (op == HCLGE_MAC_VLAN_ADD) { 3816 if ((!resp_code) || (resp_code == 1)) { 3817 return_status = 0; 3818 } else if (resp_code == 2) { 3819 return_status = -ENOSPC; 3820 dev_err(&hdev->pdev->dev, 3821 "add mac addr failed for uc_overflow.\n"); 3822 } else if (resp_code == 3) { 3823 return_status = -ENOSPC; 3824 dev_err(&hdev->pdev->dev, 3825 "add mac addr failed for mc_overflow.\n"); 3826 } else { 3827 dev_err(&hdev->pdev->dev, 3828 "add mac addr failed for undefined, code=%d.\n", 3829 resp_code); 3830 } 3831 } else if (op == HCLGE_MAC_VLAN_REMOVE) { 3832 if (!resp_code) { 3833 return_status = 0; 3834 } else if (resp_code == 1) { 3835 return_status = -ENOENT; 3836 dev_dbg(&hdev->pdev->dev, 3837 "remove mac addr failed for miss.\n"); 3838 } else { 3839 dev_err(&hdev->pdev->dev, 3840 "remove mac addr failed for undefined, code=%d.\n", 3841 resp_code); 3842 } 3843 } else if (op == HCLGE_MAC_VLAN_LKUP) { 3844 if (!resp_code) { 3845 return_status = 0; 3846 } else if (resp_code == 1) { 3847 return_status = -ENOENT; 3848 dev_dbg(&hdev->pdev->dev, 3849 "lookup mac addr failed for miss.\n"); 3850 } else { 3851 dev_err(&hdev->pdev->dev, 3852 "lookup mac addr failed for undefined, code=%d.\n", 3853 resp_code); 3854 } 3855 } else { 3856 return_status = -EINVAL; 3857 dev_err(&hdev->pdev->dev, 3858 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", 3859 op); 3860 } 3861 3862 return return_status; 3863 } 3864 3865 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) 3866 { 3867 int word_num; 3868 int bit_num; 3869 3870 if (vfid > 255 || vfid < 0) 3871 return -EIO; 3872 3873 if (vfid >= 0 && vfid <= 191) { 3874 word_num = vfid / 32; 3875 bit_num = vfid % 32; 3876 if (clr) 3877 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 3878 else 3879 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); 3880 } else { 3881 word_num = (vfid - 192) / 32; 3882 bit_num = vfid % 32; 3883 if (clr) 3884 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 3885 else 3886 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); 3887 } 3888 3889 return 0; 3890 } 3891 3892 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) 3893 { 3894 #define HCLGE_DESC_NUMBER 3 3895 #define HCLGE_FUNC_NUMBER_PER_DESC 6 3896 int i, j; 3897 3898 for (i = 0; i < HCLGE_DESC_NUMBER; i++) 3899 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) 3900 if (desc[i].data[j]) 3901 return false; 3902 3903 return true; 3904 } 3905 3906 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, 3907 const u8 *addr) 3908 { 3909 const unsigned char *mac_addr = addr; 3910 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | 3911 (mac_addr[0]) | (mac_addr[1] << 8); 3912 u32 low_val = mac_addr[4] | (mac_addr[5] << 8); 3913 3914 new_req->mac_addr_hi32 = cpu_to_le32(high_val); 3915 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); 3916 } 3917 3918 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, 3919 const u8 *addr) 3920 { 3921 u16 high_val = addr[1] | (addr[0] << 8); 3922 struct hclge_dev *hdev = vport->back; 3923 u32 rsh = 4 - hdev->mta_mac_sel_type; 3924 u16 ret_val = (high_val >> rsh) & 0xfff; 3925 3926 return ret_val; 3927 } 3928 3929 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 3930 enum hclge_mta_dmac_sel_type mta_mac_sel, 3931 bool enable) 3932 { 3933 struct hclge_mta_filter_mode_cmd *req; 3934 struct hclge_desc desc; 3935 int ret; 3936 3937 req = (struct hclge_mta_filter_mode_cmd *)desc.data; 3938 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); 3939 3940 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, 3941 enable); 3942 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, 3943 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); 3944 3945 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3946 if (ret) { 3947 dev_err(&hdev->pdev->dev, 3948 "Config mat filter mode failed for cmd_send, ret =%d.\n", 3949 ret); 3950 return ret; 3951 } 3952 3953 return 0; 3954 } 3955 3956 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, 3957 u8 func_id, 3958 bool enable) 3959 { 3960 struct hclge_cfg_func_mta_filter_cmd *req; 3961 struct hclge_desc desc; 3962 int ret; 3963 3964 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; 3965 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); 3966 3967 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, 3968 enable); 3969 req->function_id = func_id; 3970 3971 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3972 if (ret) { 3973 dev_err(&hdev->pdev->dev, 3974 "Config func_id enable failed for cmd_send, ret =%d.\n", 3975 ret); 3976 return ret; 3977 } 3978 3979 return 0; 3980 } 3981 3982 static int hclge_set_mta_table_item(struct hclge_vport *vport, 3983 u16 idx, 3984 bool enable) 3985 { 3986 struct hclge_dev *hdev = vport->back; 3987 struct hclge_cfg_func_mta_item_cmd *req; 3988 struct hclge_desc desc; 3989 u16 item_idx = 0; 3990 int ret; 3991 3992 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; 3993 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); 3994 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); 3995 3996 hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, 3997 HCLGE_CFG_MTA_ITEM_IDX_S, idx); 3998 req->item_idx = cpu_to_le16(item_idx); 3999 4000 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4001 if (ret) { 4002 dev_err(&hdev->pdev->dev, 4003 "Config mta table item failed for cmd_send, ret =%d.\n", 4004 ret); 4005 return ret; 4006 } 4007 4008 return 0; 4009 } 4010 4011 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, 4012 struct hclge_mac_vlan_tbl_entry_cmd *req) 4013 { 4014 struct hclge_dev *hdev = vport->back; 4015 struct hclge_desc desc; 4016 u8 resp_code; 4017 u16 retval; 4018 int ret; 4019 4020 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); 4021 4022 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4023 4024 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4025 if (ret) { 4026 dev_err(&hdev->pdev->dev, 4027 "del mac addr failed for cmd_send, ret =%d.\n", 4028 ret); 4029 return ret; 4030 } 4031 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 4032 retval = le16_to_cpu(desc.retval); 4033 4034 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 4035 HCLGE_MAC_VLAN_REMOVE); 4036 } 4037 4038 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, 4039 struct hclge_mac_vlan_tbl_entry_cmd *req, 4040 struct hclge_desc *desc, 4041 bool is_mc) 4042 { 4043 struct hclge_dev *hdev = vport->back; 4044 u8 resp_code; 4045 u16 retval; 4046 int ret; 4047 4048 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); 4049 if (is_mc) { 4050 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4051 memcpy(desc[0].data, 4052 req, 4053 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4054 hclge_cmd_setup_basic_desc(&desc[1], 4055 HCLGE_OPC_MAC_VLAN_ADD, 4056 true); 4057 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4058 hclge_cmd_setup_basic_desc(&desc[2], 4059 HCLGE_OPC_MAC_VLAN_ADD, 4060 true); 4061 ret = hclge_cmd_send(&hdev->hw, desc, 3); 4062 } else { 4063 memcpy(desc[0].data, 4064 req, 4065 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4066 ret = hclge_cmd_send(&hdev->hw, desc, 1); 4067 } 4068 if (ret) { 4069 dev_err(&hdev->pdev->dev, 4070 "lookup mac addr failed for cmd_send, ret =%d.\n", 4071 ret); 4072 return ret; 4073 } 4074 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; 4075 retval = le16_to_cpu(desc[0].retval); 4076 4077 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 4078 HCLGE_MAC_VLAN_LKUP); 4079 } 4080 4081 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, 4082 struct hclge_mac_vlan_tbl_entry_cmd *req, 4083 struct hclge_desc *mc_desc) 4084 { 4085 struct hclge_dev *hdev = vport->back; 4086 int cfg_status; 4087 u8 resp_code; 4088 u16 retval; 4089 int ret; 4090 4091 if (!mc_desc) { 4092 struct hclge_desc desc; 4093 4094 hclge_cmd_setup_basic_desc(&desc, 4095 HCLGE_OPC_MAC_VLAN_ADD, 4096 false); 4097 memcpy(desc.data, req, 4098 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4099 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4100 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 4101 retval = le16_to_cpu(desc.retval); 4102 4103 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 4104 resp_code, 4105 HCLGE_MAC_VLAN_ADD); 4106 } else { 4107 hclge_cmd_reuse_desc(&mc_desc[0], false); 4108 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4109 hclge_cmd_reuse_desc(&mc_desc[1], false); 4110 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4111 hclge_cmd_reuse_desc(&mc_desc[2], false); 4112 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); 4113 memcpy(mc_desc[0].data, req, 4114 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4115 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); 4116 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; 4117 retval = le16_to_cpu(mc_desc[0].retval); 4118 4119 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 4120 resp_code, 4121 HCLGE_MAC_VLAN_ADD); 4122 } 4123 4124 if (ret) { 4125 dev_err(&hdev->pdev->dev, 4126 "add mac addr failed for cmd_send, ret =%d.\n", 4127 ret); 4128 return ret; 4129 } 4130 4131 return cfg_status; 4132 } 4133 4134 static int hclge_add_uc_addr(struct hnae3_handle *handle, 4135 const unsigned char *addr) 4136 { 4137 struct hclge_vport *vport = hclge_get_vport(handle); 4138 4139 return hclge_add_uc_addr_common(vport, addr); 4140 } 4141 4142 int hclge_add_uc_addr_common(struct hclge_vport *vport, 4143 const unsigned char *addr) 4144 { 4145 struct hclge_dev *hdev = vport->back; 4146 struct hclge_mac_vlan_tbl_entry_cmd req; 4147 struct hclge_desc desc; 4148 u16 egress_port = 0; 4149 int ret; 4150 4151 /* mac addr check */ 4152 if (is_zero_ether_addr(addr) || 4153 is_broadcast_ether_addr(addr) || 4154 is_multicast_ether_addr(addr)) { 4155 dev_err(&hdev->pdev->dev, 4156 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", 4157 addr, 4158 is_zero_ether_addr(addr), 4159 is_broadcast_ether_addr(addr), 4160 is_multicast_ether_addr(addr)); 4161 return -EINVAL; 4162 } 4163 4164 memset(&req, 0, sizeof(req)); 4165 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4166 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4167 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); 4168 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4169 4170 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0); 4171 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0); 4172 hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, 4173 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); 4174 hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M, 4175 HCLGE_MAC_EPORT_PFID_S, 0); 4176 4177 req.egress_port = cpu_to_le16(egress_port); 4178 4179 hclge_prepare_mac_addr(&req, addr); 4180 4181 /* Lookup the mac address in the mac_vlan table, and add 4182 * it if the entry is inexistent. Repeated unicast entry 4183 * is not allowed in the mac vlan table. 4184 */ 4185 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); 4186 if (ret == -ENOENT) 4187 return hclge_add_mac_vlan_tbl(vport, &req, NULL); 4188 4189 /* check if we just hit the duplicate */ 4190 if (!ret) 4191 ret = -EINVAL; 4192 4193 dev_err(&hdev->pdev->dev, 4194 "PF failed to add unicast entry(%pM) in the MAC table\n", 4195 addr); 4196 4197 return ret; 4198 } 4199 4200 static int hclge_rm_uc_addr(struct hnae3_handle *handle, 4201 const unsigned char *addr) 4202 { 4203 struct hclge_vport *vport = hclge_get_vport(handle); 4204 4205 return hclge_rm_uc_addr_common(vport, addr); 4206 } 4207 4208 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 4209 const unsigned char *addr) 4210 { 4211 struct hclge_dev *hdev = vport->back; 4212 struct hclge_mac_vlan_tbl_entry_cmd req; 4213 int ret; 4214 4215 /* mac addr check */ 4216 if (is_zero_ether_addr(addr) || 4217 is_broadcast_ether_addr(addr) || 4218 is_multicast_ether_addr(addr)) { 4219 dev_dbg(&hdev->pdev->dev, 4220 "Remove mac err! invalid mac:%pM.\n", 4221 addr); 4222 return -EINVAL; 4223 } 4224 4225 memset(&req, 0, sizeof(req)); 4226 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4227 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4228 hclge_prepare_mac_addr(&req, addr); 4229 ret = hclge_remove_mac_vlan_tbl(vport, &req); 4230 4231 return ret; 4232 } 4233 4234 static int hclge_add_mc_addr(struct hnae3_handle *handle, 4235 const unsigned char *addr) 4236 { 4237 struct hclge_vport *vport = hclge_get_vport(handle); 4238 4239 return hclge_add_mc_addr_common(vport, addr); 4240 } 4241 4242 int hclge_add_mc_addr_common(struct hclge_vport *vport, 4243 const unsigned char *addr) 4244 { 4245 struct hclge_dev *hdev = vport->back; 4246 struct hclge_mac_vlan_tbl_entry_cmd req; 4247 struct hclge_desc desc[3]; 4248 u16 tbl_idx; 4249 int status; 4250 4251 /* mac addr check */ 4252 if (!is_multicast_ether_addr(addr)) { 4253 dev_err(&hdev->pdev->dev, 4254 "Add mc mac err! invalid mac:%pM.\n", 4255 addr); 4256 return -EINVAL; 4257 } 4258 memset(&req, 0, sizeof(req)); 4259 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4260 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4261 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 4262 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4263 hclge_prepare_mac_addr(&req, addr); 4264 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 4265 if (!status) { 4266 /* This mac addr exist, update VFID for it */ 4267 hclge_update_desc_vfid(desc, vport->vport_id, false); 4268 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4269 } else { 4270 /* This mac addr do not exist, add new entry for it */ 4271 memset(desc[0].data, 0, sizeof(desc[0].data)); 4272 memset(desc[1].data, 0, sizeof(desc[0].data)); 4273 memset(desc[2].data, 0, sizeof(desc[0].data)); 4274 hclge_update_desc_vfid(desc, vport->vport_id, false); 4275 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4276 } 4277 4278 /* Set MTA table for this MAC address */ 4279 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); 4280 status = hclge_set_mta_table_item(vport, tbl_idx, true); 4281 4282 return status; 4283 } 4284 4285 static int hclge_rm_mc_addr(struct hnae3_handle *handle, 4286 const unsigned char *addr) 4287 { 4288 struct hclge_vport *vport = hclge_get_vport(handle); 4289 4290 return hclge_rm_mc_addr_common(vport, addr); 4291 } 4292 4293 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 4294 const unsigned char *addr) 4295 { 4296 struct hclge_dev *hdev = vport->back; 4297 struct hclge_mac_vlan_tbl_entry_cmd req; 4298 enum hclge_cmd_status status; 4299 struct hclge_desc desc[3]; 4300 u16 tbl_idx; 4301 4302 /* mac addr check */ 4303 if (!is_multicast_ether_addr(addr)) { 4304 dev_dbg(&hdev->pdev->dev, 4305 "Remove mc mac err! invalid mac:%pM.\n", 4306 addr); 4307 return -EINVAL; 4308 } 4309 4310 memset(&req, 0, sizeof(req)); 4311 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4312 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4313 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 4314 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4315 hclge_prepare_mac_addr(&req, addr); 4316 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 4317 if (!status) { 4318 /* This mac addr exist, remove this handle's VFID for it */ 4319 hclge_update_desc_vfid(desc, vport->vport_id, true); 4320 4321 if (hclge_is_all_function_id_zero(desc)) 4322 /* All the vfid is zero, so need to delete this entry */ 4323 status = hclge_remove_mac_vlan_tbl(vport, &req); 4324 else 4325 /* Not all the vfid is zero, update the vfid */ 4326 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4327 4328 } else { 4329 /* This mac addr do not exist, can't delete it */ 4330 dev_err(&hdev->pdev->dev, 4331 "Rm multicast mac addr failed, ret = %d.\n", 4332 status); 4333 return -EIO; 4334 } 4335 4336 /* Set MTB table for this MAC address */ 4337 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); 4338 status = hclge_set_mta_table_item(vport, tbl_idx, false); 4339 4340 return status; 4341 } 4342 4343 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, 4344 u16 cmdq_resp, u8 resp_code) 4345 { 4346 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 4347 #define HCLGE_ETHERTYPE_ALREADY_ADD 1 4348 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 4349 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 4350 4351 int return_status; 4352 4353 if (cmdq_resp) { 4354 dev_err(&hdev->pdev->dev, 4355 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", 4356 cmdq_resp); 4357 return -EIO; 4358 } 4359 4360 switch (resp_code) { 4361 case HCLGE_ETHERTYPE_SUCCESS_ADD: 4362 case HCLGE_ETHERTYPE_ALREADY_ADD: 4363 return_status = 0; 4364 break; 4365 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: 4366 dev_err(&hdev->pdev->dev, 4367 "add mac ethertype failed for manager table overflow.\n"); 4368 return_status = -EIO; 4369 break; 4370 case HCLGE_ETHERTYPE_KEY_CONFLICT: 4371 dev_err(&hdev->pdev->dev, 4372 "add mac ethertype failed for key conflict.\n"); 4373 return_status = -EIO; 4374 break; 4375 default: 4376 dev_err(&hdev->pdev->dev, 4377 "add mac ethertype failed for undefined, code=%d.\n", 4378 resp_code); 4379 return_status = -EIO; 4380 } 4381 4382 return return_status; 4383 } 4384 4385 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, 4386 const struct hclge_mac_mgr_tbl_entry_cmd *req) 4387 { 4388 struct hclge_desc desc; 4389 u8 resp_code; 4390 u16 retval; 4391 int ret; 4392 4393 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); 4394 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); 4395 4396 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4397 if (ret) { 4398 dev_err(&hdev->pdev->dev, 4399 "add mac ethertype failed for cmd_send, ret =%d.\n", 4400 ret); 4401 return ret; 4402 } 4403 4404 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 4405 retval = le16_to_cpu(desc.retval); 4406 4407 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); 4408 } 4409 4410 static int init_mgr_tbl(struct hclge_dev *hdev) 4411 { 4412 int ret; 4413 int i; 4414 4415 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { 4416 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); 4417 if (ret) { 4418 dev_err(&hdev->pdev->dev, 4419 "add mac ethertype failed, ret =%d.\n", 4420 ret); 4421 return ret; 4422 } 4423 } 4424 4425 return 0; 4426 } 4427 4428 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) 4429 { 4430 struct hclge_vport *vport = hclge_get_vport(handle); 4431 struct hclge_dev *hdev = vport->back; 4432 4433 ether_addr_copy(p, hdev->hw.mac.mac_addr); 4434 } 4435 4436 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, 4437 bool is_first) 4438 { 4439 const unsigned char *new_addr = (const unsigned char *)p; 4440 struct hclge_vport *vport = hclge_get_vport(handle); 4441 struct hclge_dev *hdev = vport->back; 4442 int ret; 4443 4444 /* mac addr check */ 4445 if (is_zero_ether_addr(new_addr) || 4446 is_broadcast_ether_addr(new_addr) || 4447 is_multicast_ether_addr(new_addr)) { 4448 dev_err(&hdev->pdev->dev, 4449 "Change uc mac err! invalid mac:%p.\n", 4450 new_addr); 4451 return -EINVAL; 4452 } 4453 4454 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) 4455 dev_warn(&hdev->pdev->dev, 4456 "remove old uc mac address fail.\n"); 4457 4458 ret = hclge_add_uc_addr(handle, new_addr); 4459 if (ret) { 4460 dev_err(&hdev->pdev->dev, 4461 "add uc mac address fail, ret =%d.\n", 4462 ret); 4463 4464 if (!is_first && 4465 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) 4466 dev_err(&hdev->pdev->dev, 4467 "restore uc mac address fail.\n"); 4468 4469 return -EIO; 4470 } 4471 4472 ret = hclge_pause_addr_cfg(hdev, new_addr); 4473 if (ret) { 4474 dev_err(&hdev->pdev->dev, 4475 "configure mac pause address fail, ret =%d.\n", 4476 ret); 4477 return -EIO; 4478 } 4479 4480 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); 4481 4482 return 0; 4483 } 4484 4485 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, 4486 bool filter_en) 4487 { 4488 struct hclge_vlan_filter_ctrl_cmd *req; 4489 struct hclge_desc desc; 4490 int ret; 4491 4492 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); 4493 4494 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; 4495 req->vlan_type = vlan_type; 4496 req->vlan_fe = filter_en; 4497 4498 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4499 if (ret) { 4500 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", 4501 ret); 4502 return ret; 4503 } 4504 4505 return 0; 4506 } 4507 4508 #define HCLGE_FILTER_TYPE_VF 0 4509 #define HCLGE_FILTER_TYPE_PORT 1 4510 4511 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 4512 { 4513 struct hclge_vport *vport = hclge_get_vport(handle); 4514 struct hclge_dev *hdev = vport->back; 4515 4516 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable); 4517 } 4518 4519 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, 4520 bool is_kill, u16 vlan, u8 qos, 4521 __be16 proto) 4522 { 4523 #define HCLGE_MAX_VF_BYTES 16 4524 struct hclge_vlan_filter_vf_cfg_cmd *req0; 4525 struct hclge_vlan_filter_vf_cfg_cmd *req1; 4526 struct hclge_desc desc[2]; 4527 u8 vf_byte_val; 4528 u8 vf_byte_off; 4529 int ret; 4530 4531 hclge_cmd_setup_basic_desc(&desc[0], 4532 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 4533 hclge_cmd_setup_basic_desc(&desc[1], 4534 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 4535 4536 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4537 4538 vf_byte_off = vfid / 8; 4539 vf_byte_val = 1 << (vfid % 8); 4540 4541 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 4542 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; 4543 4544 req0->vlan_id = cpu_to_le16(vlan); 4545 req0->vlan_cfg = is_kill; 4546 4547 if (vf_byte_off < HCLGE_MAX_VF_BYTES) 4548 req0->vf_bitmap[vf_byte_off] = vf_byte_val; 4549 else 4550 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; 4551 4552 ret = hclge_cmd_send(&hdev->hw, desc, 2); 4553 if (ret) { 4554 dev_err(&hdev->pdev->dev, 4555 "Send vf vlan command fail, ret =%d.\n", 4556 ret); 4557 return ret; 4558 } 4559 4560 if (!is_kill) { 4561 if (!req0->resp_code || req0->resp_code == 1) 4562 return 0; 4563 4564 dev_err(&hdev->pdev->dev, 4565 "Add vf vlan filter fail, ret =%d.\n", 4566 req0->resp_code); 4567 } else { 4568 if (!req0->resp_code) 4569 return 0; 4570 4571 dev_err(&hdev->pdev->dev, 4572 "Kill vf vlan filter fail, ret =%d.\n", 4573 req0->resp_code); 4574 } 4575 4576 return -EIO; 4577 } 4578 4579 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, 4580 u16 vlan_id, bool is_kill) 4581 { 4582 struct hclge_vlan_filter_pf_cfg_cmd *req; 4583 struct hclge_desc desc; 4584 u8 vlan_offset_byte_val; 4585 u8 vlan_offset_byte; 4586 u8 vlan_offset_160; 4587 int ret; 4588 4589 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); 4590 4591 vlan_offset_160 = vlan_id / 160; 4592 vlan_offset_byte = (vlan_id % 160) / 8; 4593 vlan_offset_byte_val = 1 << (vlan_id % 8); 4594 4595 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; 4596 req->vlan_offset = vlan_offset_160; 4597 req->vlan_cfg = is_kill; 4598 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 4599 4600 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4601 if (ret) 4602 dev_err(&hdev->pdev->dev, 4603 "port vlan command, send fail, ret =%d.\n", ret); 4604 return ret; 4605 } 4606 4607 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, 4608 u16 vport_id, u16 vlan_id, u8 qos, 4609 bool is_kill) 4610 { 4611 u16 vport_idx, vport_num = 0; 4612 int ret; 4613 4614 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, 4615 0, proto); 4616 if (ret) { 4617 dev_err(&hdev->pdev->dev, 4618 "Set %d vport vlan filter config fail, ret =%d.\n", 4619 vport_id, ret); 4620 return ret; 4621 } 4622 4623 /* vlan 0 may be added twice when 8021q module is enabled */ 4624 if (!is_kill && !vlan_id && 4625 test_bit(vport_id, hdev->vlan_table[vlan_id])) 4626 return 0; 4627 4628 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { 4629 dev_err(&hdev->pdev->dev, 4630 "Add port vlan failed, vport %d is already in vlan %d\n", 4631 vport_id, vlan_id); 4632 return -EINVAL; 4633 } 4634 4635 if (is_kill && 4636 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { 4637 dev_err(&hdev->pdev->dev, 4638 "Delete port vlan failed, vport %d is not in vlan %d\n", 4639 vport_id, vlan_id); 4640 return -EINVAL; 4641 } 4642 4643 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID) 4644 vport_num++; 4645 4646 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) 4647 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, 4648 is_kill); 4649 4650 return ret; 4651 } 4652 4653 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 4654 u16 vlan_id, bool is_kill) 4655 { 4656 struct hclge_vport *vport = hclge_get_vport(handle); 4657 struct hclge_dev *hdev = vport->back; 4658 4659 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, 4660 0, is_kill); 4661 } 4662 4663 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, 4664 u16 vlan, u8 qos, __be16 proto) 4665 { 4666 struct hclge_vport *vport = hclge_get_vport(handle); 4667 struct hclge_dev *hdev = vport->back; 4668 4669 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) 4670 return -EINVAL; 4671 if (proto != htons(ETH_P_8021Q)) 4672 return -EPROTONOSUPPORT; 4673 4674 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); 4675 } 4676 4677 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) 4678 { 4679 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; 4680 struct hclge_vport_vtag_tx_cfg_cmd *req; 4681 struct hclge_dev *hdev = vport->back; 4682 struct hclge_desc desc; 4683 int status; 4684 4685 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); 4686 4687 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; 4688 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); 4689 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); 4690 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B, 4691 vcfg->accept_tag ? 1 : 0); 4692 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B, 4693 vcfg->accept_untag ? 1 : 0); 4694 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, 4695 vcfg->insert_tag1_en ? 1 : 0); 4696 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, 4697 vcfg->insert_tag2_en ? 1 : 0); 4698 hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); 4699 4700 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 4701 req->vf_bitmap[req->vf_offset] = 4702 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 4703 4704 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4705 if (status) 4706 dev_err(&hdev->pdev->dev, 4707 "Send port txvlan cfg command fail, ret =%d\n", 4708 status); 4709 4710 return status; 4711 } 4712 4713 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) 4714 { 4715 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; 4716 struct hclge_vport_vtag_rx_cfg_cmd *req; 4717 struct hclge_dev *hdev = vport->back; 4718 struct hclge_desc desc; 4719 int status; 4720 4721 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); 4722 4723 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; 4724 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, 4725 vcfg->strip_tag1_en ? 1 : 0); 4726 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, 4727 vcfg->strip_tag2_en ? 1 : 0); 4728 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, 4729 vcfg->vlan1_vlan_prionly ? 1 : 0); 4730 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, 4731 vcfg->vlan2_vlan_prionly ? 1 : 0); 4732 4733 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 4734 req->vf_bitmap[req->vf_offset] = 4735 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 4736 4737 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4738 if (status) 4739 dev_err(&hdev->pdev->dev, 4740 "Send port rxvlan cfg command fail, ret =%d\n", 4741 status); 4742 4743 return status; 4744 } 4745 4746 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) 4747 { 4748 struct hclge_rx_vlan_type_cfg_cmd *rx_req; 4749 struct hclge_tx_vlan_type_cfg_cmd *tx_req; 4750 struct hclge_desc desc; 4751 int status; 4752 4753 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); 4754 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; 4755 rx_req->ot_fst_vlan_type = 4756 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); 4757 rx_req->ot_sec_vlan_type = 4758 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); 4759 rx_req->in_fst_vlan_type = 4760 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); 4761 rx_req->in_sec_vlan_type = 4762 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); 4763 4764 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4765 if (status) { 4766 dev_err(&hdev->pdev->dev, 4767 "Send rxvlan protocol type command fail, ret =%d\n", 4768 status); 4769 return status; 4770 } 4771 4772 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); 4773 4774 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data; 4775 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); 4776 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); 4777 4778 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4779 if (status) 4780 dev_err(&hdev->pdev->dev, 4781 "Send txvlan protocol type command fail, ret =%d\n", 4782 status); 4783 4784 return status; 4785 } 4786 4787 static int hclge_init_vlan_config(struct hclge_dev *hdev) 4788 { 4789 #define HCLGE_DEF_VLAN_TYPE 0x8100 4790 4791 struct hnae3_handle *handle; 4792 struct hclge_vport *vport; 4793 int ret; 4794 int i; 4795 4796 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true); 4797 if (ret) 4798 return ret; 4799 4800 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true); 4801 if (ret) 4802 return ret; 4803 4804 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 4805 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 4806 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 4807 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 4808 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; 4809 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; 4810 4811 ret = hclge_set_vlan_protocol_type(hdev); 4812 if (ret) 4813 return ret; 4814 4815 for (i = 0; i < hdev->num_alloc_vport; i++) { 4816 vport = &hdev->vport[i]; 4817 vport->txvlan_cfg.accept_tag = true; 4818 vport->txvlan_cfg.accept_untag = true; 4819 vport->txvlan_cfg.insert_tag1_en = false; 4820 vport->txvlan_cfg.insert_tag2_en = false; 4821 vport->txvlan_cfg.default_tag1 = 0; 4822 vport->txvlan_cfg.default_tag2 = 0; 4823 4824 ret = hclge_set_vlan_tx_offload_cfg(vport); 4825 if (ret) 4826 return ret; 4827 4828 vport->rxvlan_cfg.strip_tag1_en = false; 4829 vport->rxvlan_cfg.strip_tag2_en = true; 4830 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 4831 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 4832 4833 ret = hclge_set_vlan_rx_offload_cfg(vport); 4834 if (ret) 4835 return ret; 4836 } 4837 4838 handle = &hdev->vport[0].nic; 4839 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); 4840 } 4841 4842 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 4843 { 4844 struct hclge_vport *vport = hclge_get_vport(handle); 4845 4846 vport->rxvlan_cfg.strip_tag1_en = false; 4847 vport->rxvlan_cfg.strip_tag2_en = enable; 4848 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 4849 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 4850 4851 return hclge_set_vlan_rx_offload_cfg(vport); 4852 } 4853 4854 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) 4855 { 4856 struct hclge_config_max_frm_size_cmd *req; 4857 struct hclge_desc desc; 4858 int max_frm_size; 4859 int ret; 4860 4861 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 4862 4863 if (max_frm_size < HCLGE_MAC_MIN_FRAME || 4864 max_frm_size > HCLGE_MAC_MAX_FRAME) 4865 return -EINVAL; 4866 4867 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); 4868 4869 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); 4870 4871 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 4872 req->max_frm_size = cpu_to_le16(max_frm_size); 4873 4874 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4875 if (ret) { 4876 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); 4877 return ret; 4878 } 4879 4880 hdev->mps = max_frm_size; 4881 4882 return 0; 4883 } 4884 4885 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) 4886 { 4887 struct hclge_vport *vport = hclge_get_vport(handle); 4888 struct hclge_dev *hdev = vport->back; 4889 int ret; 4890 4891 ret = hclge_set_mac_mtu(hdev, new_mtu); 4892 if (ret) { 4893 dev_err(&hdev->pdev->dev, 4894 "Change mtu fail, ret =%d\n", ret); 4895 return ret; 4896 } 4897 4898 ret = hclge_buffer_alloc(hdev); 4899 if (ret) 4900 dev_err(&hdev->pdev->dev, 4901 "Allocate buffer fail, ret =%d\n", ret); 4902 4903 return ret; 4904 } 4905 4906 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, 4907 bool enable) 4908 { 4909 struct hclge_reset_tqp_queue_cmd *req; 4910 struct hclge_desc desc; 4911 int ret; 4912 4913 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); 4914 4915 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 4916 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 4917 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); 4918 4919 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4920 if (ret) { 4921 dev_err(&hdev->pdev->dev, 4922 "Send tqp reset cmd error, status =%d\n", ret); 4923 return ret; 4924 } 4925 4926 return 0; 4927 } 4928 4929 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) 4930 { 4931 struct hclge_reset_tqp_queue_cmd *req; 4932 struct hclge_desc desc; 4933 int ret; 4934 4935 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); 4936 4937 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 4938 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 4939 4940 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4941 if (ret) { 4942 dev_err(&hdev->pdev->dev, 4943 "Get reset status error, status =%d\n", ret); 4944 return ret; 4945 } 4946 4947 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 4948 } 4949 4950 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, 4951 u16 queue_id) 4952 { 4953 struct hnae3_queue *queue; 4954 struct hclge_tqp *tqp; 4955 4956 queue = handle->kinfo.tqp[queue_id]; 4957 tqp = container_of(queue, struct hclge_tqp, q); 4958 4959 return tqp->index; 4960 } 4961 4962 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 4963 { 4964 struct hclge_vport *vport = hclge_get_vport(handle); 4965 struct hclge_dev *hdev = vport->back; 4966 int reset_try_times = 0; 4967 int reset_status; 4968 u16 queue_gid; 4969 int ret; 4970 4971 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 4972 return; 4973 4974 queue_gid = hclge_covert_handle_qid_global(handle, queue_id); 4975 4976 ret = hclge_tqp_enable(hdev, queue_id, 0, false); 4977 if (ret) { 4978 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); 4979 return; 4980 } 4981 4982 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 4983 if (ret) { 4984 dev_warn(&hdev->pdev->dev, 4985 "Send reset tqp cmd fail, ret = %d\n", ret); 4986 return; 4987 } 4988 4989 reset_try_times = 0; 4990 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 4991 /* Wait for tqp hw reset */ 4992 msleep(20); 4993 reset_status = hclge_get_reset_status(hdev, queue_gid); 4994 if (reset_status) 4995 break; 4996 } 4997 4998 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 4999 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 5000 return; 5001 } 5002 5003 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 5004 if (ret) { 5005 dev_warn(&hdev->pdev->dev, 5006 "Deassert the soft reset fail, ret = %d\n", ret); 5007 return; 5008 } 5009 } 5010 5011 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) 5012 { 5013 struct hclge_dev *hdev = vport->back; 5014 int reset_try_times = 0; 5015 int reset_status; 5016 u16 queue_gid; 5017 int ret; 5018 5019 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); 5020 5021 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 5022 if (ret) { 5023 dev_warn(&hdev->pdev->dev, 5024 "Send reset tqp cmd fail, ret = %d\n", ret); 5025 return; 5026 } 5027 5028 reset_try_times = 0; 5029 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 5030 /* Wait for tqp hw reset */ 5031 msleep(20); 5032 reset_status = hclge_get_reset_status(hdev, queue_gid); 5033 if (reset_status) 5034 break; 5035 } 5036 5037 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 5038 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 5039 return; 5040 } 5041 5042 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 5043 if (ret) 5044 dev_warn(&hdev->pdev->dev, 5045 "Deassert the soft reset fail, ret = %d\n", ret); 5046 } 5047 5048 static u32 hclge_get_fw_version(struct hnae3_handle *handle) 5049 { 5050 struct hclge_vport *vport = hclge_get_vport(handle); 5051 struct hclge_dev *hdev = vport->back; 5052 5053 return hdev->fw_version; 5054 } 5055 5056 static void hclge_get_flowctrl_adv(struct hnae3_handle *handle, 5057 u32 *flowctrl_adv) 5058 { 5059 struct hclge_vport *vport = hclge_get_vport(handle); 5060 struct hclge_dev *hdev = vport->back; 5061 struct phy_device *phydev = hdev->hw.mac.phydev; 5062 5063 if (!phydev) 5064 return; 5065 5066 *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) | 5067 (phydev->advertising & ADVERTISED_Asym_Pause); 5068 } 5069 5070 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 5071 { 5072 struct phy_device *phydev = hdev->hw.mac.phydev; 5073 5074 if (!phydev) 5075 return; 5076 5077 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); 5078 5079 if (rx_en) 5080 phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; 5081 5082 if (tx_en) 5083 phydev->advertising ^= ADVERTISED_Asym_Pause; 5084 } 5085 5086 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 5087 { 5088 int ret; 5089 5090 if (rx_en && tx_en) 5091 hdev->fc_mode_last_time = HCLGE_FC_FULL; 5092 else if (rx_en && !tx_en) 5093 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; 5094 else if (!rx_en && tx_en) 5095 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; 5096 else 5097 hdev->fc_mode_last_time = HCLGE_FC_NONE; 5098 5099 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 5100 return 0; 5101 5102 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 5103 if (ret) { 5104 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", 5105 ret); 5106 return ret; 5107 } 5108 5109 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 5110 5111 return 0; 5112 } 5113 5114 int hclge_cfg_flowctrl(struct hclge_dev *hdev) 5115 { 5116 struct phy_device *phydev = hdev->hw.mac.phydev; 5117 u16 remote_advertising = 0; 5118 u16 local_advertising = 0; 5119 u32 rx_pause, tx_pause; 5120 u8 flowctl; 5121 5122 if (!phydev->link || !phydev->autoneg) 5123 return 0; 5124 5125 if (phydev->advertising & ADVERTISED_Pause) 5126 local_advertising = ADVERTISE_PAUSE_CAP; 5127 5128 if (phydev->advertising & ADVERTISED_Asym_Pause) 5129 local_advertising |= ADVERTISE_PAUSE_ASYM; 5130 5131 if (phydev->pause) 5132 remote_advertising = LPA_PAUSE_CAP; 5133 5134 if (phydev->asym_pause) 5135 remote_advertising |= LPA_PAUSE_ASYM; 5136 5137 flowctl = mii_resolve_flowctrl_fdx(local_advertising, 5138 remote_advertising); 5139 tx_pause = flowctl & FLOW_CTRL_TX; 5140 rx_pause = flowctl & FLOW_CTRL_RX; 5141 5142 if (phydev->duplex == HCLGE_MAC_HALF) { 5143 tx_pause = 0; 5144 rx_pause = 0; 5145 } 5146 5147 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); 5148 } 5149 5150 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, 5151 u32 *rx_en, u32 *tx_en) 5152 { 5153 struct hclge_vport *vport = hclge_get_vport(handle); 5154 struct hclge_dev *hdev = vport->back; 5155 5156 *auto_neg = hclge_get_autoneg(handle); 5157 5158 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 5159 *rx_en = 0; 5160 *tx_en = 0; 5161 return; 5162 } 5163 5164 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { 5165 *rx_en = 1; 5166 *tx_en = 0; 5167 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { 5168 *tx_en = 1; 5169 *rx_en = 0; 5170 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { 5171 *rx_en = 1; 5172 *tx_en = 1; 5173 } else { 5174 *rx_en = 0; 5175 *tx_en = 0; 5176 } 5177 } 5178 5179 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, 5180 u32 rx_en, u32 tx_en) 5181 { 5182 struct hclge_vport *vport = hclge_get_vport(handle); 5183 struct hclge_dev *hdev = vport->back; 5184 struct phy_device *phydev = hdev->hw.mac.phydev; 5185 u32 fc_autoneg; 5186 5187 fc_autoneg = hclge_get_autoneg(handle); 5188 if (auto_neg != fc_autoneg) { 5189 dev_info(&hdev->pdev->dev, 5190 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 5191 return -EOPNOTSUPP; 5192 } 5193 5194 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 5195 dev_info(&hdev->pdev->dev, 5196 "Priority flow control enabled. Cannot set link flow control.\n"); 5197 return -EOPNOTSUPP; 5198 } 5199 5200 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); 5201 5202 if (!fc_autoneg) 5203 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); 5204 5205 /* Only support flow control negotiation for netdev with 5206 * phy attached for now. 5207 */ 5208 if (!phydev) 5209 return -EOPNOTSUPP; 5210 5211 return phy_start_aneg(phydev); 5212 } 5213 5214 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, 5215 u8 *auto_neg, u32 *speed, u8 *duplex) 5216 { 5217 struct hclge_vport *vport = hclge_get_vport(handle); 5218 struct hclge_dev *hdev = vport->back; 5219 5220 if (speed) 5221 *speed = hdev->hw.mac.speed; 5222 if (duplex) 5223 *duplex = hdev->hw.mac.duplex; 5224 if (auto_neg) 5225 *auto_neg = hdev->hw.mac.autoneg; 5226 } 5227 5228 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) 5229 { 5230 struct hclge_vport *vport = hclge_get_vport(handle); 5231 struct hclge_dev *hdev = vport->back; 5232 5233 if (media_type) 5234 *media_type = hdev->hw.mac.media_type; 5235 } 5236 5237 static void hclge_get_mdix_mode(struct hnae3_handle *handle, 5238 u8 *tp_mdix_ctrl, u8 *tp_mdix) 5239 { 5240 struct hclge_vport *vport = hclge_get_vport(handle); 5241 struct hclge_dev *hdev = vport->back; 5242 struct phy_device *phydev = hdev->hw.mac.phydev; 5243 int mdix_ctrl, mdix, retval, is_resolved; 5244 5245 if (!phydev) { 5246 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 5247 *tp_mdix = ETH_TP_MDI_INVALID; 5248 return; 5249 } 5250 5251 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); 5252 5253 retval = phy_read(phydev, HCLGE_PHY_CSC_REG); 5254 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, 5255 HCLGE_PHY_MDIX_CTRL_S); 5256 5257 retval = phy_read(phydev, HCLGE_PHY_CSS_REG); 5258 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); 5259 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); 5260 5261 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); 5262 5263 switch (mdix_ctrl) { 5264 case 0x0: 5265 *tp_mdix_ctrl = ETH_TP_MDI; 5266 break; 5267 case 0x1: 5268 *tp_mdix_ctrl = ETH_TP_MDI_X; 5269 break; 5270 case 0x3: 5271 *tp_mdix_ctrl = ETH_TP_MDI_AUTO; 5272 break; 5273 default: 5274 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 5275 break; 5276 } 5277 5278 if (!is_resolved) 5279 *tp_mdix = ETH_TP_MDI_INVALID; 5280 else if (mdix) 5281 *tp_mdix = ETH_TP_MDI_X; 5282 else 5283 *tp_mdix = ETH_TP_MDI; 5284 } 5285 5286 static int hclge_init_client_instance(struct hnae3_client *client, 5287 struct hnae3_ae_dev *ae_dev) 5288 { 5289 struct hclge_dev *hdev = ae_dev->priv; 5290 struct hclge_vport *vport; 5291 int i, ret; 5292 5293 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 5294 vport = &hdev->vport[i]; 5295 5296 switch (client->type) { 5297 case HNAE3_CLIENT_KNIC: 5298 5299 hdev->nic_client = client; 5300 vport->nic.client = client; 5301 ret = client->ops->init_instance(&vport->nic); 5302 if (ret) 5303 return ret; 5304 5305 if (hdev->roce_client && 5306 hnae3_dev_roce_supported(hdev)) { 5307 struct hnae3_client *rc = hdev->roce_client; 5308 5309 ret = hclge_init_roce_base_info(vport); 5310 if (ret) 5311 return ret; 5312 5313 ret = rc->ops->init_instance(&vport->roce); 5314 if (ret) 5315 return ret; 5316 } 5317 5318 break; 5319 case HNAE3_CLIENT_UNIC: 5320 hdev->nic_client = client; 5321 vport->nic.client = client; 5322 5323 ret = client->ops->init_instance(&vport->nic); 5324 if (ret) 5325 return ret; 5326 5327 break; 5328 case HNAE3_CLIENT_ROCE: 5329 if (hnae3_dev_roce_supported(hdev)) { 5330 hdev->roce_client = client; 5331 vport->roce.client = client; 5332 } 5333 5334 if (hdev->roce_client && hdev->nic_client) { 5335 ret = hclge_init_roce_base_info(vport); 5336 if (ret) 5337 return ret; 5338 5339 ret = client->ops->init_instance(&vport->roce); 5340 if (ret) 5341 return ret; 5342 } 5343 } 5344 } 5345 5346 return 0; 5347 } 5348 5349 static void hclge_uninit_client_instance(struct hnae3_client *client, 5350 struct hnae3_ae_dev *ae_dev) 5351 { 5352 struct hclge_dev *hdev = ae_dev->priv; 5353 struct hclge_vport *vport; 5354 int i; 5355 5356 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 5357 vport = &hdev->vport[i]; 5358 if (hdev->roce_client) { 5359 hdev->roce_client->ops->uninit_instance(&vport->roce, 5360 0); 5361 hdev->roce_client = NULL; 5362 vport->roce.client = NULL; 5363 } 5364 if (client->type == HNAE3_CLIENT_ROCE) 5365 return; 5366 if (client->ops->uninit_instance) { 5367 client->ops->uninit_instance(&vport->nic, 0); 5368 hdev->nic_client = NULL; 5369 vport->nic.client = NULL; 5370 } 5371 } 5372 } 5373 5374 static int hclge_pci_init(struct hclge_dev *hdev) 5375 { 5376 struct pci_dev *pdev = hdev->pdev; 5377 struct hclge_hw *hw; 5378 int ret; 5379 5380 ret = pci_enable_device(pdev); 5381 if (ret) { 5382 dev_err(&pdev->dev, "failed to enable PCI device\n"); 5383 return ret; 5384 } 5385 5386 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 5387 if (ret) { 5388 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 5389 if (ret) { 5390 dev_err(&pdev->dev, 5391 "can't set consistent PCI DMA"); 5392 goto err_disable_device; 5393 } 5394 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); 5395 } 5396 5397 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); 5398 if (ret) { 5399 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 5400 goto err_disable_device; 5401 } 5402 5403 pci_set_master(pdev); 5404 hw = &hdev->hw; 5405 hw->back = hdev; 5406 hw->io_base = pcim_iomap(pdev, 2, 0); 5407 if (!hw->io_base) { 5408 dev_err(&pdev->dev, "Can't map configuration register space\n"); 5409 ret = -ENOMEM; 5410 goto err_clr_master; 5411 } 5412 5413 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); 5414 5415 return 0; 5416 err_clr_master: 5417 pci_clear_master(pdev); 5418 pci_release_regions(pdev); 5419 err_disable_device: 5420 pci_disable_device(pdev); 5421 5422 return ret; 5423 } 5424 5425 static void hclge_pci_uninit(struct hclge_dev *hdev) 5426 { 5427 struct pci_dev *pdev = hdev->pdev; 5428 5429 pcim_iounmap(pdev, hdev->hw.io_base); 5430 pci_free_irq_vectors(pdev); 5431 pci_clear_master(pdev); 5432 pci_release_mem_regions(pdev); 5433 pci_disable_device(pdev); 5434 } 5435 5436 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) 5437 { 5438 struct pci_dev *pdev = ae_dev->pdev; 5439 struct hclge_dev *hdev; 5440 int ret; 5441 5442 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 5443 if (!hdev) { 5444 ret = -ENOMEM; 5445 goto out; 5446 } 5447 5448 hdev->pdev = pdev; 5449 hdev->ae_dev = ae_dev; 5450 hdev->reset_type = HNAE3_NONE_RESET; 5451 hdev->reset_request = 0; 5452 hdev->reset_pending = 0; 5453 ae_dev->priv = hdev; 5454 5455 ret = hclge_pci_init(hdev); 5456 if (ret) { 5457 dev_err(&pdev->dev, "PCI init failed\n"); 5458 goto out; 5459 } 5460 5461 /* Firmware command queue initialize */ 5462 ret = hclge_cmd_queue_init(hdev); 5463 if (ret) { 5464 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); 5465 goto err_pci_uninit; 5466 } 5467 5468 /* Firmware command initialize */ 5469 ret = hclge_cmd_init(hdev); 5470 if (ret) 5471 goto err_cmd_uninit; 5472 5473 ret = hclge_get_cap(hdev); 5474 if (ret) { 5475 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 5476 ret); 5477 goto err_cmd_uninit; 5478 } 5479 5480 ret = hclge_configure(hdev); 5481 if (ret) { 5482 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 5483 goto err_cmd_uninit; 5484 } 5485 5486 ret = hclge_init_msi(hdev); 5487 if (ret) { 5488 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); 5489 goto err_cmd_uninit; 5490 } 5491 5492 ret = hclge_misc_irq_init(hdev); 5493 if (ret) { 5494 dev_err(&pdev->dev, 5495 "Misc IRQ(vector0) init error, ret = %d.\n", 5496 ret); 5497 goto err_msi_uninit; 5498 } 5499 5500 ret = hclge_alloc_tqps(hdev); 5501 if (ret) { 5502 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); 5503 goto err_msi_irq_uninit; 5504 } 5505 5506 ret = hclge_alloc_vport(hdev); 5507 if (ret) { 5508 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); 5509 goto err_msi_irq_uninit; 5510 } 5511 5512 ret = hclge_map_tqp(hdev); 5513 if (ret) { 5514 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 5515 goto err_msi_irq_uninit; 5516 } 5517 5518 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { 5519 ret = hclge_mac_mdio_config(hdev); 5520 if (ret) { 5521 dev_err(&hdev->pdev->dev, 5522 "mdio config fail ret=%d\n", ret); 5523 goto err_msi_irq_uninit; 5524 } 5525 } 5526 5527 ret = hclge_mac_init(hdev); 5528 if (ret) { 5529 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 5530 goto err_mdiobus_unreg; 5531 } 5532 5533 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 5534 if (ret) { 5535 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 5536 goto err_mdiobus_unreg; 5537 } 5538 5539 ret = hclge_init_vlan_config(hdev); 5540 if (ret) { 5541 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 5542 goto err_mdiobus_unreg; 5543 } 5544 5545 ret = hclge_tm_schd_init(hdev); 5546 if (ret) { 5547 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 5548 goto err_mdiobus_unreg; 5549 } 5550 5551 hclge_rss_init_cfg(hdev); 5552 ret = hclge_rss_init_hw(hdev); 5553 if (ret) { 5554 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 5555 goto err_mdiobus_unreg; 5556 } 5557 5558 ret = init_mgr_tbl(hdev); 5559 if (ret) { 5560 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); 5561 goto err_mdiobus_unreg; 5562 } 5563 5564 hclge_dcb_ops_set(hdev); 5565 5566 timer_setup(&hdev->service_timer, hclge_service_timer, 0); 5567 INIT_WORK(&hdev->service_task, hclge_service_task); 5568 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); 5569 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); 5570 5571 /* Enable MISC vector(vector0) */ 5572 hclge_enable_vector(&hdev->misc_vector, true); 5573 5574 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); 5575 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5576 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 5577 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 5578 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 5579 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 5580 5581 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); 5582 return 0; 5583 5584 err_mdiobus_unreg: 5585 if (hdev->hw.mac.phydev) 5586 mdiobus_unregister(hdev->hw.mac.mdio_bus); 5587 err_msi_irq_uninit: 5588 hclge_misc_irq_uninit(hdev); 5589 err_msi_uninit: 5590 pci_free_irq_vectors(pdev); 5591 err_cmd_uninit: 5592 hclge_destroy_cmd_queue(&hdev->hw); 5593 err_pci_uninit: 5594 pcim_iounmap(pdev, hdev->hw.io_base); 5595 pci_clear_master(pdev); 5596 pci_release_regions(pdev); 5597 pci_disable_device(pdev); 5598 out: 5599 return ret; 5600 } 5601 5602 static void hclge_stats_clear(struct hclge_dev *hdev) 5603 { 5604 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); 5605 } 5606 5607 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) 5608 { 5609 struct hclge_dev *hdev = ae_dev->priv; 5610 struct pci_dev *pdev = ae_dev->pdev; 5611 int ret; 5612 5613 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5614 5615 hclge_stats_clear(hdev); 5616 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); 5617 5618 ret = hclge_cmd_init(hdev); 5619 if (ret) { 5620 dev_err(&pdev->dev, "Cmd queue init failed\n"); 5621 return ret; 5622 } 5623 5624 ret = hclge_get_cap(hdev); 5625 if (ret) { 5626 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 5627 ret); 5628 return ret; 5629 } 5630 5631 ret = hclge_configure(hdev); 5632 if (ret) { 5633 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 5634 return ret; 5635 } 5636 5637 ret = hclge_map_tqp(hdev); 5638 if (ret) { 5639 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 5640 return ret; 5641 } 5642 5643 ret = hclge_mac_init(hdev); 5644 if (ret) { 5645 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 5646 return ret; 5647 } 5648 5649 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 5650 if (ret) { 5651 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 5652 return ret; 5653 } 5654 5655 ret = hclge_init_vlan_config(hdev); 5656 if (ret) { 5657 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 5658 return ret; 5659 } 5660 5661 ret = hclge_tm_init_hw(hdev); 5662 if (ret) { 5663 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); 5664 return ret; 5665 } 5666 5667 ret = hclge_rss_init_hw(hdev); 5668 if (ret) { 5669 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 5670 return ret; 5671 } 5672 5673 /* Enable MISC vector(vector0) */ 5674 hclge_enable_vector(&hdev->misc_vector, true); 5675 5676 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", 5677 HCLGE_DRIVER_NAME); 5678 5679 return 0; 5680 } 5681 5682 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 5683 { 5684 struct hclge_dev *hdev = ae_dev->priv; 5685 struct hclge_mac *mac = &hdev->hw.mac; 5686 5687 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5688 5689 if (hdev->service_timer.function) 5690 del_timer_sync(&hdev->service_timer); 5691 if (hdev->service_task.func) 5692 cancel_work_sync(&hdev->service_task); 5693 if (hdev->rst_service_task.func) 5694 cancel_work_sync(&hdev->rst_service_task); 5695 if (hdev->mbx_service_task.func) 5696 cancel_work_sync(&hdev->mbx_service_task); 5697 5698 if (mac->phydev) 5699 mdiobus_unregister(mac->mdio_bus); 5700 5701 /* Disable MISC vector(vector0) */ 5702 hclge_enable_vector(&hdev->misc_vector, false); 5703 hclge_destroy_cmd_queue(&hdev->hw); 5704 hclge_misc_irq_uninit(hdev); 5705 hclge_pci_uninit(hdev); 5706 ae_dev->priv = NULL; 5707 } 5708 5709 static u32 hclge_get_max_channels(struct hnae3_handle *handle) 5710 { 5711 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 5712 struct hclge_vport *vport = hclge_get_vport(handle); 5713 struct hclge_dev *hdev = vport->back; 5714 5715 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 5716 } 5717 5718 static void hclge_get_channels(struct hnae3_handle *handle, 5719 struct ethtool_channels *ch) 5720 { 5721 struct hclge_vport *vport = hclge_get_vport(handle); 5722 5723 ch->max_combined = hclge_get_max_channels(handle); 5724 ch->other_count = 1; 5725 ch->max_other = 1; 5726 ch->combined_count = vport->alloc_tqps; 5727 } 5728 5729 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, 5730 u16 *free_tqps, u16 *max_rss_size) 5731 { 5732 struct hclge_vport *vport = hclge_get_vport(handle); 5733 struct hclge_dev *hdev = vport->back; 5734 u16 temp_tqps = 0; 5735 int i; 5736 5737 for (i = 0; i < hdev->num_tqps; i++) { 5738 if (!hdev->htqp[i].alloced) 5739 temp_tqps++; 5740 } 5741 *free_tqps = temp_tqps; 5742 *max_rss_size = hdev->rss_size_max; 5743 } 5744 5745 static void hclge_release_tqp(struct hclge_vport *vport) 5746 { 5747 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5748 struct hclge_dev *hdev = vport->back; 5749 int i; 5750 5751 for (i = 0; i < kinfo->num_tqps; i++) { 5752 struct hclge_tqp *tqp = 5753 container_of(kinfo->tqp[i], struct hclge_tqp, q); 5754 5755 tqp->q.handle = NULL; 5756 tqp->q.tqp_index = 0; 5757 tqp->alloced = false; 5758 } 5759 5760 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 5761 kinfo->tqp = NULL; 5762 } 5763 5764 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) 5765 { 5766 struct hclge_vport *vport = hclge_get_vport(handle); 5767 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5768 struct hclge_dev *hdev = vport->back; 5769 int cur_rss_size = kinfo->rss_size; 5770 int cur_tqps = kinfo->num_tqps; 5771 u16 tc_offset[HCLGE_MAX_TC_NUM]; 5772 u16 tc_valid[HCLGE_MAX_TC_NUM]; 5773 u16 tc_size[HCLGE_MAX_TC_NUM]; 5774 u16 roundup_size; 5775 u32 *rss_indir; 5776 int ret, i; 5777 5778 hclge_release_tqp(vport); 5779 5780 ret = hclge_knic_setup(vport, new_tqps_num); 5781 if (ret) { 5782 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); 5783 return ret; 5784 } 5785 5786 ret = hclge_map_tqp_to_vport(hdev, vport); 5787 if (ret) { 5788 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); 5789 return ret; 5790 } 5791 5792 ret = hclge_tm_schd_init(hdev); 5793 if (ret) { 5794 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); 5795 return ret; 5796 } 5797 5798 roundup_size = roundup_pow_of_two(kinfo->rss_size); 5799 roundup_size = ilog2(roundup_size); 5800 /* Set the RSS TC mode according to the new RSS size */ 5801 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 5802 tc_valid[i] = 0; 5803 5804 if (!(hdev->hw_tc_map & BIT(i))) 5805 continue; 5806 5807 tc_valid[i] = 1; 5808 tc_size[i] = roundup_size; 5809 tc_offset[i] = kinfo->rss_size * i; 5810 } 5811 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 5812 if (ret) 5813 return ret; 5814 5815 /* Reinitializes the rss indirect table according to the new RSS size */ 5816 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 5817 if (!rss_indir) 5818 return -ENOMEM; 5819 5820 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 5821 rss_indir[i] = i % kinfo->rss_size; 5822 5823 ret = hclge_set_rss(handle, rss_indir, NULL, 0); 5824 if (ret) 5825 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 5826 ret); 5827 5828 kfree(rss_indir); 5829 5830 if (!ret) 5831 dev_info(&hdev->pdev->dev, 5832 "Channels changed, rss_size from %d to %d, tqps from %d to %d", 5833 cur_rss_size, kinfo->rss_size, 5834 cur_tqps, kinfo->rss_size * kinfo->num_tc); 5835 5836 return ret; 5837 } 5838 5839 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, 5840 u32 *regs_num_64_bit) 5841 { 5842 struct hclge_desc desc; 5843 u32 total_num; 5844 int ret; 5845 5846 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); 5847 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5848 if (ret) { 5849 dev_err(&hdev->pdev->dev, 5850 "Query register number cmd failed, ret = %d.\n", ret); 5851 return ret; 5852 } 5853 5854 *regs_num_32_bit = le32_to_cpu(desc.data[0]); 5855 *regs_num_64_bit = le32_to_cpu(desc.data[1]); 5856 5857 total_num = *regs_num_32_bit + *regs_num_64_bit; 5858 if (!total_num) 5859 return -EINVAL; 5860 5861 return 0; 5862 } 5863 5864 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, 5865 void *data) 5866 { 5867 #define HCLGE_32_BIT_REG_RTN_DATANUM 8 5868 5869 struct hclge_desc *desc; 5870 u32 *reg_val = data; 5871 __le32 *desc_data; 5872 int cmd_num; 5873 int i, k, n; 5874 int ret; 5875 5876 if (regs_num == 0) 5877 return 0; 5878 5879 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); 5880 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 5881 if (!desc) 5882 return -ENOMEM; 5883 5884 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); 5885 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 5886 if (ret) { 5887 dev_err(&hdev->pdev->dev, 5888 "Query 32 bit register cmd failed, ret = %d.\n", ret); 5889 kfree(desc); 5890 return ret; 5891 } 5892 5893 for (i = 0; i < cmd_num; i++) { 5894 if (i == 0) { 5895 desc_data = (__le32 *)(&desc[i].data[0]); 5896 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; 5897 } else { 5898 desc_data = (__le32 *)(&desc[i]); 5899 n = HCLGE_32_BIT_REG_RTN_DATANUM; 5900 } 5901 for (k = 0; k < n; k++) { 5902 *reg_val++ = le32_to_cpu(*desc_data++); 5903 5904 regs_num--; 5905 if (!regs_num) 5906 break; 5907 } 5908 } 5909 5910 kfree(desc); 5911 return 0; 5912 } 5913 5914 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, 5915 void *data) 5916 { 5917 #define HCLGE_64_BIT_REG_RTN_DATANUM 4 5918 5919 struct hclge_desc *desc; 5920 u64 *reg_val = data; 5921 __le64 *desc_data; 5922 int cmd_num; 5923 int i, k, n; 5924 int ret; 5925 5926 if (regs_num == 0) 5927 return 0; 5928 5929 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); 5930 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 5931 if (!desc) 5932 return -ENOMEM; 5933 5934 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); 5935 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 5936 if (ret) { 5937 dev_err(&hdev->pdev->dev, 5938 "Query 64 bit register cmd failed, ret = %d.\n", ret); 5939 kfree(desc); 5940 return ret; 5941 } 5942 5943 for (i = 0; i < cmd_num; i++) { 5944 if (i == 0) { 5945 desc_data = (__le64 *)(&desc[i].data[0]); 5946 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; 5947 } else { 5948 desc_data = (__le64 *)(&desc[i]); 5949 n = HCLGE_64_BIT_REG_RTN_DATANUM; 5950 } 5951 for (k = 0; k < n; k++) { 5952 *reg_val++ = le64_to_cpu(*desc_data++); 5953 5954 regs_num--; 5955 if (!regs_num) 5956 break; 5957 } 5958 } 5959 5960 kfree(desc); 5961 return 0; 5962 } 5963 5964 static int hclge_get_regs_len(struct hnae3_handle *handle) 5965 { 5966 struct hclge_vport *vport = hclge_get_vport(handle); 5967 struct hclge_dev *hdev = vport->back; 5968 u32 regs_num_32_bit, regs_num_64_bit; 5969 int ret; 5970 5971 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 5972 if (ret) { 5973 dev_err(&hdev->pdev->dev, 5974 "Get register number failed, ret = %d.\n", ret); 5975 return -EOPNOTSUPP; 5976 } 5977 5978 return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); 5979 } 5980 5981 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, 5982 void *data) 5983 { 5984 struct hclge_vport *vport = hclge_get_vport(handle); 5985 struct hclge_dev *hdev = vport->back; 5986 u32 regs_num_32_bit, regs_num_64_bit; 5987 int ret; 5988 5989 *version = hdev->fw_version; 5990 5991 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 5992 if (ret) { 5993 dev_err(&hdev->pdev->dev, 5994 "Get register number failed, ret = %d.\n", ret); 5995 return; 5996 } 5997 5998 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data); 5999 if (ret) { 6000 dev_err(&hdev->pdev->dev, 6001 "Get 32 bit register failed, ret = %d.\n", ret); 6002 return; 6003 } 6004 6005 data = (u32 *)data + regs_num_32_bit; 6006 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, 6007 data); 6008 if (ret) 6009 dev_err(&hdev->pdev->dev, 6010 "Get 64 bit register failed, ret = %d.\n", ret); 6011 } 6012 6013 static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status, 6014 u8 act_led_status, u8 link_led_status, 6015 u8 locate_led_status) 6016 { 6017 struct hclge_set_led_state_cmd *req; 6018 struct hclge_desc desc; 6019 int ret; 6020 6021 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); 6022 6023 req = (struct hclge_set_led_state_cmd *)desc.data; 6024 hnae_set_field(req->port_speed_led_config, HCLGE_LED_PORT_SPEED_STATE_M, 6025 HCLGE_LED_PORT_SPEED_STATE_S, speed_led_status); 6026 hnae_set_field(req->link_led_config, HCLGE_LED_ACTIVITY_STATE_M, 6027 HCLGE_LED_ACTIVITY_STATE_S, act_led_status); 6028 hnae_set_field(req->activity_led_config, HCLGE_LED_LINK_STATE_M, 6029 HCLGE_LED_LINK_STATE_S, link_led_status); 6030 hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, 6031 HCLGE_LED_LOCATE_STATE_S, locate_led_status); 6032 6033 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6034 if (ret) 6035 dev_err(&hdev->pdev->dev, 6036 "Send set led state cmd error, ret =%d\n", ret); 6037 6038 return ret; 6039 } 6040 6041 enum hclge_led_status { 6042 HCLGE_LED_OFF, 6043 HCLGE_LED_ON, 6044 HCLGE_LED_NO_CHANGE = 0xFF, 6045 }; 6046 6047 static int hclge_set_led_id(struct hnae3_handle *handle, 6048 enum ethtool_phys_id_state status) 6049 { 6050 #define BLINK_FREQUENCY 2 6051 struct hclge_vport *vport = hclge_get_vport(handle); 6052 struct hclge_dev *hdev = vport->back; 6053 struct phy_device *phydev = hdev->hw.mac.phydev; 6054 int ret = 0; 6055 6056 if (phydev || hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) 6057 return -EOPNOTSUPP; 6058 6059 switch (status) { 6060 case ETHTOOL_ID_ACTIVE: 6061 ret = hclge_set_led_status_sfp(hdev, 6062 HCLGE_LED_NO_CHANGE, 6063 HCLGE_LED_NO_CHANGE, 6064 HCLGE_LED_NO_CHANGE, 6065 HCLGE_LED_ON); 6066 break; 6067 case ETHTOOL_ID_INACTIVE: 6068 ret = hclge_set_led_status_sfp(hdev, 6069 HCLGE_LED_NO_CHANGE, 6070 HCLGE_LED_NO_CHANGE, 6071 HCLGE_LED_NO_CHANGE, 6072 HCLGE_LED_OFF); 6073 break; 6074 default: 6075 ret = -EINVAL; 6076 break; 6077 } 6078 6079 return ret; 6080 } 6081 6082 enum hclge_led_port_speed { 6083 HCLGE_SPEED_LED_FOR_1G, 6084 HCLGE_SPEED_LED_FOR_10G, 6085 HCLGE_SPEED_LED_FOR_25G, 6086 HCLGE_SPEED_LED_FOR_40G, 6087 HCLGE_SPEED_LED_FOR_50G, 6088 HCLGE_SPEED_LED_FOR_100G, 6089 }; 6090 6091 static u8 hclge_led_get_speed_status(u32 speed) 6092 { 6093 u8 speed_led; 6094 6095 switch (speed) { 6096 case HCLGE_MAC_SPEED_1G: 6097 speed_led = HCLGE_SPEED_LED_FOR_1G; 6098 break; 6099 case HCLGE_MAC_SPEED_10G: 6100 speed_led = HCLGE_SPEED_LED_FOR_10G; 6101 break; 6102 case HCLGE_MAC_SPEED_25G: 6103 speed_led = HCLGE_SPEED_LED_FOR_25G; 6104 break; 6105 case HCLGE_MAC_SPEED_40G: 6106 speed_led = HCLGE_SPEED_LED_FOR_40G; 6107 break; 6108 case HCLGE_MAC_SPEED_50G: 6109 speed_led = HCLGE_SPEED_LED_FOR_50G; 6110 break; 6111 case HCLGE_MAC_SPEED_100G: 6112 speed_led = HCLGE_SPEED_LED_FOR_100G; 6113 break; 6114 default: 6115 speed_led = HCLGE_LED_NO_CHANGE; 6116 } 6117 6118 return speed_led; 6119 } 6120 6121 static int hclge_update_led_status(struct hclge_dev *hdev) 6122 { 6123 u8 port_speed_status, link_status, activity_status; 6124 u64 rx_pkts, tx_pkts; 6125 6126 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) 6127 return 0; 6128 6129 port_speed_status = hclge_led_get_speed_status(hdev->hw.mac.speed); 6130 6131 rx_pkts = hdev->hw_stats.mac_stats.mac_rx_total_pkt_num; 6132 tx_pkts = hdev->hw_stats.mac_stats.mac_tx_total_pkt_num; 6133 if (rx_pkts != hdev->rx_pkts_for_led || 6134 tx_pkts != hdev->tx_pkts_for_led) 6135 activity_status = HCLGE_LED_ON; 6136 else 6137 activity_status = HCLGE_LED_OFF; 6138 hdev->rx_pkts_for_led = rx_pkts; 6139 hdev->tx_pkts_for_led = tx_pkts; 6140 6141 if (hdev->hw.mac.link) 6142 link_status = HCLGE_LED_ON; 6143 else 6144 link_status = HCLGE_LED_OFF; 6145 6146 return hclge_set_led_status_sfp(hdev, port_speed_status, 6147 activity_status, link_status, 6148 HCLGE_LED_NO_CHANGE); 6149 } 6150 6151 static void hclge_get_link_mode(struct hnae3_handle *handle, 6152 unsigned long *supported, 6153 unsigned long *advertising) 6154 { 6155 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); 6156 struct hclge_vport *vport = hclge_get_vport(handle); 6157 struct hclge_dev *hdev = vport->back; 6158 unsigned int idx = 0; 6159 6160 for (; idx < size; idx++) { 6161 supported[idx] = hdev->hw.mac.supported[idx]; 6162 advertising[idx] = hdev->hw.mac.advertising[idx]; 6163 } 6164 } 6165 6166 static void hclge_get_port_type(struct hnae3_handle *handle, 6167 u8 *port_type) 6168 { 6169 struct hclge_vport *vport = hclge_get_vport(handle); 6170 struct hclge_dev *hdev = vport->back; 6171 u8 media_type = hdev->hw.mac.media_type; 6172 6173 switch (media_type) { 6174 case HNAE3_MEDIA_TYPE_FIBER: 6175 *port_type = PORT_FIBRE; 6176 break; 6177 case HNAE3_MEDIA_TYPE_COPPER: 6178 *port_type = PORT_TP; 6179 break; 6180 case HNAE3_MEDIA_TYPE_UNKNOWN: 6181 default: 6182 *port_type = PORT_OTHER; 6183 break; 6184 } 6185 } 6186 6187 static const struct hnae3_ae_ops hclge_ops = { 6188 .init_ae_dev = hclge_init_ae_dev, 6189 .uninit_ae_dev = hclge_uninit_ae_dev, 6190 .init_client_instance = hclge_init_client_instance, 6191 .uninit_client_instance = hclge_uninit_client_instance, 6192 .map_ring_to_vector = hclge_map_ring_to_vector, 6193 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, 6194 .get_vector = hclge_get_vector, 6195 .put_vector = hclge_put_vector, 6196 .set_promisc_mode = hclge_set_promisc_mode, 6197 .set_loopback = hclge_set_loopback, 6198 .start = hclge_ae_start, 6199 .stop = hclge_ae_stop, 6200 .get_status = hclge_get_status, 6201 .get_ksettings_an_result = hclge_get_ksettings_an_result, 6202 .update_speed_duplex_h = hclge_update_speed_duplex_h, 6203 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, 6204 .get_media_type = hclge_get_media_type, 6205 .get_rss_key_size = hclge_get_rss_key_size, 6206 .get_rss_indir_size = hclge_get_rss_indir_size, 6207 .get_rss = hclge_get_rss, 6208 .set_rss = hclge_set_rss, 6209 .set_rss_tuple = hclge_set_rss_tuple, 6210 .get_rss_tuple = hclge_get_rss_tuple, 6211 .get_tc_size = hclge_get_tc_size, 6212 .get_mac_addr = hclge_get_mac_addr, 6213 .set_mac_addr = hclge_set_mac_addr, 6214 .add_uc_addr = hclge_add_uc_addr, 6215 .rm_uc_addr = hclge_rm_uc_addr, 6216 .add_mc_addr = hclge_add_mc_addr, 6217 .rm_mc_addr = hclge_rm_mc_addr, 6218 .set_autoneg = hclge_set_autoneg, 6219 .get_autoneg = hclge_get_autoneg, 6220 .get_pauseparam = hclge_get_pauseparam, 6221 .set_pauseparam = hclge_set_pauseparam, 6222 .set_mtu = hclge_set_mtu, 6223 .reset_queue = hclge_reset_tqp, 6224 .get_stats = hclge_get_stats, 6225 .update_stats = hclge_update_stats, 6226 .get_strings = hclge_get_strings, 6227 .get_sset_count = hclge_get_sset_count, 6228 .get_fw_version = hclge_get_fw_version, 6229 .get_mdix_mode = hclge_get_mdix_mode, 6230 .enable_vlan_filter = hclge_enable_vlan_filter, 6231 .set_vlan_filter = hclge_set_vlan_filter, 6232 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 6233 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, 6234 .reset_event = hclge_reset_event, 6235 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, 6236 .set_channels = hclge_set_channels, 6237 .get_channels = hclge_get_channels, 6238 .get_flowctrl_adv = hclge_get_flowctrl_adv, 6239 .get_regs_len = hclge_get_regs_len, 6240 .get_regs = hclge_get_regs, 6241 .set_led_id = hclge_set_led_id, 6242 .get_link_mode = hclge_get_link_mode, 6243 .get_port_type = hclge_get_port_type, 6244 }; 6245 6246 static struct hnae3_ae_algo ae_algo = { 6247 .ops = &hclge_ops, 6248 .name = HCLGE_NAME, 6249 .pdev_id_table = ae_algo_pci_tbl, 6250 }; 6251 6252 static int hclge_init(void) 6253 { 6254 pr_info("%s is initializing\n", HCLGE_NAME); 6255 6256 hnae3_register_ae_algo(&ae_algo); 6257 6258 return 0; 6259 } 6260 6261 static void hclge_exit(void) 6262 { 6263 hnae3_unregister_ae_algo(&ae_algo); 6264 } 6265 module_init(hclge_init); 6266 module_exit(hclge_exit); 6267 6268 MODULE_LICENSE("GPL"); 6269 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 6270 MODULE_DESCRIPTION("HCLGE Driver"); 6271 MODULE_VERSION(HCLGE_MOD_VERSION); 6272