1 /* 2 * Copyright (c) 2016-2017 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/device.h> 12 #include <linux/etherdevice.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/netdevice.h> 18 #include <linux/pci.h> 19 #include <linux/platform_device.h> 20 #include <linux/if_vlan.h> 21 #include <net/rtnetlink.h> 22 #include "hclge_cmd.h" 23 #include "hclge_dcb.h" 24 #include "hclge_main.h" 25 #include "hclge_mbx.h" 26 #include "hclge_mdio.h" 27 #include "hclge_tm.h" 28 #include "hnae3.h" 29 30 #define HCLGE_NAME "hclge" 31 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) 32 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) 33 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) 34 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) 35 36 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 37 enum hclge_mta_dmac_sel_type mta_mac_sel, 38 bool enable); 39 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); 40 static int hclge_init_vlan_config(struct hclge_dev *hdev); 41 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); 42 43 static struct hnae3_ae_algo ae_algo; 44 45 static const struct pci_device_id ae_algo_pci_tbl[] = { 46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 53 /* required last entry */ 54 {0, } 55 }; 56 57 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); 58 59 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { 60 "Mac Loopback test", 61 "Serdes Loopback test", 62 "Phy Loopback test" 63 }; 64 65 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = { 66 {"igu_rx_oversize_pkt", 67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)}, 68 {"igu_rx_undersize_pkt", 69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)}, 70 {"igu_rx_out_all_pkt", 71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)}, 72 {"igu_rx_uni_pkt", 73 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)}, 74 {"igu_rx_multi_pkt", 75 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)}, 76 {"igu_rx_broad_pkt", 77 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)}, 78 {"egu_tx_out_all_pkt", 79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)}, 80 {"egu_tx_uni_pkt", 81 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)}, 82 {"egu_tx_multi_pkt", 83 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)}, 84 {"egu_tx_broad_pkt", 85 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)}, 86 {"ssu_ppp_mac_key_num", 87 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)}, 88 {"ssu_ppp_host_key_num", 89 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)}, 90 {"ppp_ssu_mac_rlt_num", 91 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)}, 92 {"ppp_ssu_host_rlt_num", 93 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)}, 94 {"ssu_tx_in_num", 95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)}, 96 {"ssu_tx_out_num", 97 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)}, 98 {"ssu_rx_in_num", 99 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)}, 100 {"ssu_rx_out_num", 101 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)} 102 }; 103 104 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = { 105 {"igu_rx_err_pkt", 106 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)}, 107 {"igu_rx_no_eof_pkt", 108 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)}, 109 {"igu_rx_no_sof_pkt", 110 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)}, 111 {"egu_tx_1588_pkt", 112 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)}, 113 {"ssu_full_drop_num", 114 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)}, 115 {"ssu_part_drop_num", 116 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)}, 117 {"ppp_key_drop_num", 118 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)}, 119 {"ppp_rlt_drop_num", 120 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)}, 121 {"ssu_key_drop_num", 122 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)}, 123 {"pkt_curr_buf_cnt", 124 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)}, 125 {"qcn_fb_rcv_cnt", 126 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)}, 127 {"qcn_fb_drop_cnt", 128 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)}, 129 {"qcn_fb_invaild_cnt", 130 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)}, 131 {"rx_packet_tc0_in_cnt", 132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)}, 133 {"rx_packet_tc1_in_cnt", 134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)}, 135 {"rx_packet_tc2_in_cnt", 136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)}, 137 {"rx_packet_tc3_in_cnt", 138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)}, 139 {"rx_packet_tc4_in_cnt", 140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)}, 141 {"rx_packet_tc5_in_cnt", 142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)}, 143 {"rx_packet_tc6_in_cnt", 144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)}, 145 {"rx_packet_tc7_in_cnt", 146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)}, 147 {"rx_packet_tc0_out_cnt", 148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)}, 149 {"rx_packet_tc1_out_cnt", 150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)}, 151 {"rx_packet_tc2_out_cnt", 152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)}, 153 {"rx_packet_tc3_out_cnt", 154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)}, 155 {"rx_packet_tc4_out_cnt", 156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)}, 157 {"rx_packet_tc5_out_cnt", 158 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)}, 159 {"rx_packet_tc6_out_cnt", 160 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)}, 161 {"rx_packet_tc7_out_cnt", 162 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)}, 163 {"tx_packet_tc0_in_cnt", 164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)}, 165 {"tx_packet_tc1_in_cnt", 166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)}, 167 {"tx_packet_tc2_in_cnt", 168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)}, 169 {"tx_packet_tc3_in_cnt", 170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)}, 171 {"tx_packet_tc4_in_cnt", 172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)}, 173 {"tx_packet_tc5_in_cnt", 174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)}, 175 {"tx_packet_tc6_in_cnt", 176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)}, 177 {"tx_packet_tc7_in_cnt", 178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)}, 179 {"tx_packet_tc0_out_cnt", 180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)}, 181 {"tx_packet_tc1_out_cnt", 182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)}, 183 {"tx_packet_tc2_out_cnt", 184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)}, 185 {"tx_packet_tc3_out_cnt", 186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)}, 187 {"tx_packet_tc4_out_cnt", 188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)}, 189 {"tx_packet_tc5_out_cnt", 190 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)}, 191 {"tx_packet_tc6_out_cnt", 192 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)}, 193 {"tx_packet_tc7_out_cnt", 194 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)}, 195 {"pkt_curr_buf_tc0_cnt", 196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)}, 197 {"pkt_curr_buf_tc1_cnt", 198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)}, 199 {"pkt_curr_buf_tc2_cnt", 200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)}, 201 {"pkt_curr_buf_tc3_cnt", 202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)}, 203 {"pkt_curr_buf_tc4_cnt", 204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)}, 205 {"pkt_curr_buf_tc5_cnt", 206 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)}, 207 {"pkt_curr_buf_tc6_cnt", 208 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)}, 209 {"pkt_curr_buf_tc7_cnt", 210 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)}, 211 {"mb_uncopy_num", 212 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)}, 213 {"lo_pri_unicast_rlt_drop_num", 214 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)}, 215 {"hi_pri_multicast_rlt_drop_num", 216 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)}, 217 {"lo_pri_multicast_rlt_drop_num", 218 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)}, 219 {"rx_oq_drop_pkt_cnt", 220 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)}, 221 {"tx_oq_drop_pkt_cnt", 222 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)}, 223 {"nic_l2_err_drop_pkt_cnt", 224 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)}, 225 {"roc_l2_err_drop_pkt_cnt", 226 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)} 227 }; 228 229 static const struct hclge_comm_stats_str g_mac_stats_string[] = { 230 {"mac_tx_mac_pause_num", 231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, 232 {"mac_rx_mac_pause_num", 233 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, 234 {"mac_tx_pfc_pri0_pkt_num", 235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, 236 {"mac_tx_pfc_pri1_pkt_num", 237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, 238 {"mac_tx_pfc_pri2_pkt_num", 239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, 240 {"mac_tx_pfc_pri3_pkt_num", 241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, 242 {"mac_tx_pfc_pri4_pkt_num", 243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, 244 {"mac_tx_pfc_pri5_pkt_num", 245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, 246 {"mac_tx_pfc_pri6_pkt_num", 247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, 248 {"mac_tx_pfc_pri7_pkt_num", 249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, 250 {"mac_rx_pfc_pri0_pkt_num", 251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, 252 {"mac_rx_pfc_pri1_pkt_num", 253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, 254 {"mac_rx_pfc_pri2_pkt_num", 255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, 256 {"mac_rx_pfc_pri3_pkt_num", 257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, 258 {"mac_rx_pfc_pri4_pkt_num", 259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, 260 {"mac_rx_pfc_pri5_pkt_num", 261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, 262 {"mac_rx_pfc_pri6_pkt_num", 263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, 264 {"mac_rx_pfc_pri7_pkt_num", 265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, 266 {"mac_tx_total_pkt_num", 267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, 268 {"mac_tx_total_oct_num", 269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, 270 {"mac_tx_good_pkt_num", 271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, 272 {"mac_tx_bad_pkt_num", 273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, 274 {"mac_tx_good_oct_num", 275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, 276 {"mac_tx_bad_oct_num", 277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, 278 {"mac_tx_uni_pkt_num", 279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, 280 {"mac_tx_multi_pkt_num", 281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, 282 {"mac_tx_broad_pkt_num", 283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, 284 {"mac_tx_undersize_pkt_num", 285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, 286 {"mac_tx_oversize_pkt_num", 287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, 288 {"mac_tx_64_oct_pkt_num", 289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, 290 {"mac_tx_65_127_oct_pkt_num", 291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, 292 {"mac_tx_128_255_oct_pkt_num", 293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, 294 {"mac_tx_256_511_oct_pkt_num", 295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, 296 {"mac_tx_512_1023_oct_pkt_num", 297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, 298 {"mac_tx_1024_1518_oct_pkt_num", 299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, 300 {"mac_tx_1519_2047_oct_pkt_num", 301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, 302 {"mac_tx_2048_4095_oct_pkt_num", 303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, 304 {"mac_tx_4096_8191_oct_pkt_num", 305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, 306 {"mac_tx_8192_9216_oct_pkt_num", 307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, 308 {"mac_tx_9217_12287_oct_pkt_num", 309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, 310 {"mac_tx_12288_16383_oct_pkt_num", 311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, 312 {"mac_tx_1519_max_good_pkt_num", 313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, 314 {"mac_tx_1519_max_bad_pkt_num", 315 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, 316 {"mac_rx_total_pkt_num", 317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, 318 {"mac_rx_total_oct_num", 319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, 320 {"mac_rx_good_pkt_num", 321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, 322 {"mac_rx_bad_pkt_num", 323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, 324 {"mac_rx_good_oct_num", 325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, 326 {"mac_rx_bad_oct_num", 327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, 328 {"mac_rx_uni_pkt_num", 329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, 330 {"mac_rx_multi_pkt_num", 331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, 332 {"mac_rx_broad_pkt_num", 333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, 334 {"mac_rx_undersize_pkt_num", 335 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, 336 {"mac_rx_oversize_pkt_num", 337 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, 338 {"mac_rx_64_oct_pkt_num", 339 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, 340 {"mac_rx_65_127_oct_pkt_num", 341 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, 342 {"mac_rx_128_255_oct_pkt_num", 343 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, 344 {"mac_rx_256_511_oct_pkt_num", 345 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, 346 {"mac_rx_512_1023_oct_pkt_num", 347 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, 348 {"mac_rx_1024_1518_oct_pkt_num", 349 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, 350 {"mac_rx_1519_2047_oct_pkt_num", 351 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, 352 {"mac_rx_2048_4095_oct_pkt_num", 353 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, 354 {"mac_rx_4096_8191_oct_pkt_num", 355 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, 356 {"mac_rx_8192_9216_oct_pkt_num", 357 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, 358 {"mac_rx_9217_12287_oct_pkt_num", 359 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, 360 {"mac_rx_12288_16383_oct_pkt_num", 361 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, 362 {"mac_rx_1519_max_good_pkt_num", 363 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, 364 {"mac_rx_1519_max_bad_pkt_num", 365 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, 366 367 {"mac_tx_fragment_pkt_num", 368 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, 369 {"mac_tx_undermin_pkt_num", 370 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, 371 {"mac_tx_jabber_pkt_num", 372 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, 373 {"mac_tx_err_all_pkt_num", 374 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, 375 {"mac_tx_from_app_good_pkt_num", 376 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, 377 {"mac_tx_from_app_bad_pkt_num", 378 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, 379 {"mac_rx_fragment_pkt_num", 380 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, 381 {"mac_rx_undermin_pkt_num", 382 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, 383 {"mac_rx_jabber_pkt_num", 384 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, 385 {"mac_rx_fcs_err_pkt_num", 386 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, 387 {"mac_rx_send_app_good_pkt_num", 388 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, 389 {"mac_rx_send_app_bad_pkt_num", 390 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} 391 }; 392 393 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { 394 { 395 .flags = HCLGE_MAC_MGR_MASK_VLAN_B, 396 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP), 397 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), 398 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)), 399 .i_port_bitmap = 0x1, 400 }, 401 }; 402 403 static int hclge_64_bit_update_stats(struct hclge_dev *hdev) 404 { 405 #define HCLGE_64_BIT_CMD_NUM 5 406 #define HCLGE_64_BIT_RTN_DATANUM 4 407 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); 408 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; 409 __le64 *desc_data; 410 int i, k, n; 411 int ret; 412 413 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true); 414 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM); 415 if (ret) { 416 dev_err(&hdev->pdev->dev, 417 "Get 64 bit pkt stats fail, status = %d.\n", ret); 418 return ret; 419 } 420 421 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { 422 if (unlikely(i == 0)) { 423 desc_data = (__le64 *)(&desc[i].data[0]); 424 n = HCLGE_64_BIT_RTN_DATANUM - 1; 425 } else { 426 desc_data = (__le64 *)(&desc[i]); 427 n = HCLGE_64_BIT_RTN_DATANUM; 428 } 429 for (k = 0; k < n; k++) { 430 *data++ += le64_to_cpu(*desc_data); 431 desc_data++; 432 } 433 } 434 435 return 0; 436 } 437 438 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats) 439 { 440 stats->pkt_curr_buf_cnt = 0; 441 stats->pkt_curr_buf_tc0_cnt = 0; 442 stats->pkt_curr_buf_tc1_cnt = 0; 443 stats->pkt_curr_buf_tc2_cnt = 0; 444 stats->pkt_curr_buf_tc3_cnt = 0; 445 stats->pkt_curr_buf_tc4_cnt = 0; 446 stats->pkt_curr_buf_tc5_cnt = 0; 447 stats->pkt_curr_buf_tc6_cnt = 0; 448 stats->pkt_curr_buf_tc7_cnt = 0; 449 } 450 451 static int hclge_32_bit_update_stats(struct hclge_dev *hdev) 452 { 453 #define HCLGE_32_BIT_CMD_NUM 8 454 #define HCLGE_32_BIT_RTN_DATANUM 8 455 456 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; 457 struct hclge_32_bit_stats *all_32_bit_stats; 458 __le32 *desc_data; 459 int i, k, n; 460 u64 *data; 461 int ret; 462 463 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats; 464 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt); 465 466 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true); 467 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM); 468 if (ret) { 469 dev_err(&hdev->pdev->dev, 470 "Get 32 bit pkt stats fail, status = %d.\n", ret); 471 472 return ret; 473 } 474 475 hclge_reset_partial_32bit_counter(all_32_bit_stats); 476 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { 477 if (unlikely(i == 0)) { 478 __le16 *desc_data_16bit; 479 480 all_32_bit_stats->igu_rx_err_pkt += 481 le32_to_cpu(desc[i].data[0]); 482 483 desc_data_16bit = (__le16 *)&desc[i].data[1]; 484 all_32_bit_stats->igu_rx_no_eof_pkt += 485 le16_to_cpu(*desc_data_16bit); 486 487 desc_data_16bit++; 488 all_32_bit_stats->igu_rx_no_sof_pkt += 489 le16_to_cpu(*desc_data_16bit); 490 491 desc_data = &desc[i].data[2]; 492 n = HCLGE_32_BIT_RTN_DATANUM - 4; 493 } else { 494 desc_data = (__le32 *)&desc[i]; 495 n = HCLGE_32_BIT_RTN_DATANUM; 496 } 497 for (k = 0; k < n; k++) { 498 *data++ += le32_to_cpu(*desc_data); 499 desc_data++; 500 } 501 } 502 503 return 0; 504 } 505 506 static int hclge_mac_update_stats(struct hclge_dev *hdev) 507 { 508 #define HCLGE_MAC_CMD_NUM 21 509 #define HCLGE_RTN_DATA_NUM 4 510 511 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); 512 struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; 513 __le64 *desc_data; 514 int i, k, n; 515 int ret; 516 517 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); 518 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); 519 if (ret) { 520 dev_err(&hdev->pdev->dev, 521 "Get MAC pkt stats fail, status = %d.\n", ret); 522 523 return ret; 524 } 525 526 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { 527 if (unlikely(i == 0)) { 528 desc_data = (__le64 *)(&desc[i].data[0]); 529 n = HCLGE_RTN_DATA_NUM - 2; 530 } else { 531 desc_data = (__le64 *)(&desc[i]); 532 n = HCLGE_RTN_DATA_NUM; 533 } 534 for (k = 0; k < n; k++) { 535 *data++ += le64_to_cpu(*desc_data); 536 desc_data++; 537 } 538 } 539 540 return 0; 541 } 542 543 static int hclge_tqps_update_stats(struct hnae3_handle *handle) 544 { 545 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 546 struct hclge_vport *vport = hclge_get_vport(handle); 547 struct hclge_dev *hdev = vport->back; 548 struct hnae3_queue *queue; 549 struct hclge_desc desc[1]; 550 struct hclge_tqp *tqp; 551 int ret, i; 552 553 for (i = 0; i < kinfo->num_tqps; i++) { 554 queue = handle->kinfo.tqp[i]; 555 tqp = container_of(queue, struct hclge_tqp, q); 556 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 557 hclge_cmd_setup_basic_desc(&desc[0], 558 HCLGE_OPC_QUERY_RX_STATUS, 559 true); 560 561 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 562 ret = hclge_cmd_send(&hdev->hw, desc, 1); 563 if (ret) { 564 dev_err(&hdev->pdev->dev, 565 "Query tqp stat fail, status = %d,queue = %d\n", 566 ret, i); 567 return ret; 568 } 569 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 570 le32_to_cpu(desc[0].data[1]); 571 } 572 573 for (i = 0; i < kinfo->num_tqps; i++) { 574 queue = handle->kinfo.tqp[i]; 575 tqp = container_of(queue, struct hclge_tqp, q); 576 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 577 hclge_cmd_setup_basic_desc(&desc[0], 578 HCLGE_OPC_QUERY_TX_STATUS, 579 true); 580 581 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 582 ret = hclge_cmd_send(&hdev->hw, desc, 1); 583 if (ret) { 584 dev_err(&hdev->pdev->dev, 585 "Query tqp stat fail, status = %d,queue = %d\n", 586 ret, i); 587 return ret; 588 } 589 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 590 le32_to_cpu(desc[0].data[1]); 591 } 592 593 return 0; 594 } 595 596 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 597 { 598 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 599 struct hclge_tqp *tqp; 600 u64 *buff = data; 601 int i; 602 603 for (i = 0; i < kinfo->num_tqps; i++) { 604 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 605 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 606 } 607 608 for (i = 0; i < kinfo->num_tqps; i++) { 609 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 610 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 611 } 612 613 return buff; 614 } 615 616 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) 617 { 618 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 619 620 return kinfo->num_tqps * (2); 621 } 622 623 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 624 { 625 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 626 u8 *buff = data; 627 int i = 0; 628 629 for (i = 0; i < kinfo->num_tqps; i++) { 630 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], 631 struct hclge_tqp, q); 632 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", 633 tqp->index); 634 buff = buff + ETH_GSTRING_LEN; 635 } 636 637 for (i = 0; i < kinfo->num_tqps; i++) { 638 struct hclge_tqp *tqp = container_of(kinfo->tqp[i], 639 struct hclge_tqp, q); 640 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", 641 tqp->index); 642 buff = buff + ETH_GSTRING_LEN; 643 } 644 645 return buff; 646 } 647 648 static u64 *hclge_comm_get_stats(void *comm_stats, 649 const struct hclge_comm_stats_str strs[], 650 int size, u64 *data) 651 { 652 u64 *buf = data; 653 u32 i; 654 655 for (i = 0; i < size; i++) 656 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); 657 658 return buf + size; 659 } 660 661 static u8 *hclge_comm_get_strings(u32 stringset, 662 const struct hclge_comm_stats_str strs[], 663 int size, u8 *data) 664 { 665 char *buff = (char *)data; 666 u32 i; 667 668 if (stringset != ETH_SS_STATS) 669 return buff; 670 671 for (i = 0; i < size; i++) { 672 snprintf(buff, ETH_GSTRING_LEN, 673 strs[i].desc); 674 buff = buff + ETH_GSTRING_LEN; 675 } 676 677 return (u8 *)buff; 678 } 679 680 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, 681 struct net_device_stats *net_stats) 682 { 683 net_stats->tx_dropped = 0; 684 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num; 685 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; 686 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; 687 688 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; 689 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; 690 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; 691 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; 692 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; 693 694 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; 695 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; 696 697 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; 698 net_stats->rx_length_errors = 699 hw_stats->mac_stats.mac_rx_undersize_pkt_num; 700 net_stats->rx_length_errors += 701 hw_stats->mac_stats.mac_rx_oversize_pkt_num; 702 net_stats->rx_over_errors = 703 hw_stats->mac_stats.mac_rx_oversize_pkt_num; 704 } 705 706 static void hclge_update_stats_for_all(struct hclge_dev *hdev) 707 { 708 struct hnae3_handle *handle; 709 int status; 710 711 handle = &hdev->vport[0].nic; 712 if (handle->client) { 713 status = hclge_tqps_update_stats(handle); 714 if (status) { 715 dev_err(&hdev->pdev->dev, 716 "Update TQPS stats fail, status = %d.\n", 717 status); 718 } 719 } 720 721 status = hclge_mac_update_stats(hdev); 722 if (status) 723 dev_err(&hdev->pdev->dev, 724 "Update MAC stats fail, status = %d.\n", status); 725 726 status = hclge_32_bit_update_stats(hdev); 727 if (status) 728 dev_err(&hdev->pdev->dev, 729 "Update 32 bit stats fail, status = %d.\n", 730 status); 731 732 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); 733 } 734 735 static void hclge_update_stats(struct hnae3_handle *handle, 736 struct net_device_stats *net_stats) 737 { 738 struct hclge_vport *vport = hclge_get_vport(handle); 739 struct hclge_dev *hdev = vport->back; 740 struct hclge_hw_stats *hw_stats = &hdev->hw_stats; 741 int status; 742 743 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) 744 return; 745 746 status = hclge_mac_update_stats(hdev); 747 if (status) 748 dev_err(&hdev->pdev->dev, 749 "Update MAC stats fail, status = %d.\n", 750 status); 751 752 status = hclge_32_bit_update_stats(hdev); 753 if (status) 754 dev_err(&hdev->pdev->dev, 755 "Update 32 bit stats fail, status = %d.\n", 756 status); 757 758 status = hclge_64_bit_update_stats(hdev); 759 if (status) 760 dev_err(&hdev->pdev->dev, 761 "Update 64 bit stats fail, status = %d.\n", 762 status); 763 764 status = hclge_tqps_update_stats(handle); 765 if (status) 766 dev_err(&hdev->pdev->dev, 767 "Update TQPS stats fail, status = %d.\n", 768 status); 769 770 hclge_update_netstat(hw_stats, net_stats); 771 772 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); 773 } 774 775 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) 776 { 777 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7 778 779 struct hclge_vport *vport = hclge_get_vport(handle); 780 struct hclge_dev *hdev = vport->back; 781 int count = 0; 782 783 /* Loopback test support rules: 784 * mac: only GE mode support 785 * serdes: all mac mode will support include GE/XGE/LGE/CGE 786 * phy: only support when phy device exist on board 787 */ 788 if (stringset == ETH_SS_TEST) { 789 /* clear loopback bit flags at first */ 790 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); 791 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || 792 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || 793 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { 794 count += 1; 795 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; 796 } else { 797 count = -EOPNOTSUPP; 798 } 799 } else if (stringset == ETH_SS_STATS) { 800 count = ARRAY_SIZE(g_mac_stats_string) + 801 ARRAY_SIZE(g_all_32bit_stats_string) + 802 ARRAY_SIZE(g_all_64bit_stats_string) + 803 hclge_tqps_get_sset_count(handle, stringset); 804 } 805 806 return count; 807 } 808 809 static void hclge_get_strings(struct hnae3_handle *handle, 810 u32 stringset, 811 u8 *data) 812 { 813 u8 *p = (char *)data; 814 int size; 815 816 if (stringset == ETH_SS_STATS) { 817 size = ARRAY_SIZE(g_mac_stats_string); 818 p = hclge_comm_get_strings(stringset, 819 g_mac_stats_string, 820 size, 821 p); 822 size = ARRAY_SIZE(g_all_32bit_stats_string); 823 p = hclge_comm_get_strings(stringset, 824 g_all_32bit_stats_string, 825 size, 826 p); 827 size = ARRAY_SIZE(g_all_64bit_stats_string); 828 p = hclge_comm_get_strings(stringset, 829 g_all_64bit_stats_string, 830 size, 831 p); 832 p = hclge_tqps_get_strings(handle, p); 833 } else if (stringset == ETH_SS_TEST) { 834 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) { 835 memcpy(p, 836 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC], 837 ETH_GSTRING_LEN); 838 p += ETH_GSTRING_LEN; 839 } 840 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) { 841 memcpy(p, 842 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES], 843 ETH_GSTRING_LEN); 844 p += ETH_GSTRING_LEN; 845 } 846 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { 847 memcpy(p, 848 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY], 849 ETH_GSTRING_LEN); 850 p += ETH_GSTRING_LEN; 851 } 852 } 853 } 854 855 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) 856 { 857 struct hclge_vport *vport = hclge_get_vport(handle); 858 struct hclge_dev *hdev = vport->back; 859 u64 *p; 860 861 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, 862 g_mac_stats_string, 863 ARRAY_SIZE(g_mac_stats_string), 864 data); 865 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats, 866 g_all_32bit_stats_string, 867 ARRAY_SIZE(g_all_32bit_stats_string), 868 p); 869 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats, 870 g_all_64bit_stats_string, 871 ARRAY_SIZE(g_all_64bit_stats_string), 872 p); 873 p = hclge_tqps_get_stats(handle, p); 874 } 875 876 static int hclge_parse_func_status(struct hclge_dev *hdev, 877 struct hclge_func_status_cmd *status) 878 { 879 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) 880 return -EINVAL; 881 882 /* Set the pf to main pf */ 883 if (status->pf_state & HCLGE_PF_STATE_MAIN) 884 hdev->flag |= HCLGE_FLAG_MAIN; 885 else 886 hdev->flag &= ~HCLGE_FLAG_MAIN; 887 888 return 0; 889 } 890 891 static int hclge_query_function_status(struct hclge_dev *hdev) 892 { 893 struct hclge_func_status_cmd *req; 894 struct hclge_desc desc; 895 int timeout = 0; 896 int ret; 897 898 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); 899 req = (struct hclge_func_status_cmd *)desc.data; 900 901 do { 902 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 903 if (ret) { 904 dev_err(&hdev->pdev->dev, 905 "query function status failed %d.\n", 906 ret); 907 908 return ret; 909 } 910 911 /* Check pf reset is done */ 912 if (req->pf_state) 913 break; 914 usleep_range(1000, 2000); 915 } while (timeout++ < 5); 916 917 ret = hclge_parse_func_status(hdev, req); 918 919 return ret; 920 } 921 922 static int hclge_query_pf_resource(struct hclge_dev *hdev) 923 { 924 struct hclge_pf_res_cmd *req; 925 struct hclge_desc desc; 926 int ret; 927 928 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); 929 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 930 if (ret) { 931 dev_err(&hdev->pdev->dev, 932 "query pf resource failed %d.\n", ret); 933 return ret; 934 } 935 936 req = (struct hclge_pf_res_cmd *)desc.data; 937 hdev->num_tqps = __le16_to_cpu(req->tqp_num); 938 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; 939 940 if (hnae3_dev_roce_supported(hdev)) { 941 hdev->num_roce_msi = 942 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), 943 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 944 945 /* PF should have NIC vectors and Roce vectors, 946 * NIC vectors are queued before Roce vectors. 947 */ 948 hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET; 949 } else { 950 hdev->num_msi = 951 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), 952 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 953 } 954 955 return 0; 956 } 957 958 static int hclge_parse_speed(int speed_cmd, int *speed) 959 { 960 switch (speed_cmd) { 961 case 6: 962 *speed = HCLGE_MAC_SPEED_10M; 963 break; 964 case 7: 965 *speed = HCLGE_MAC_SPEED_100M; 966 break; 967 case 0: 968 *speed = HCLGE_MAC_SPEED_1G; 969 break; 970 case 1: 971 *speed = HCLGE_MAC_SPEED_10G; 972 break; 973 case 2: 974 *speed = HCLGE_MAC_SPEED_25G; 975 break; 976 case 3: 977 *speed = HCLGE_MAC_SPEED_40G; 978 break; 979 case 4: 980 *speed = HCLGE_MAC_SPEED_50G; 981 break; 982 case 5: 983 *speed = HCLGE_MAC_SPEED_100G; 984 break; 985 default: 986 return -EINVAL; 987 } 988 989 return 0; 990 } 991 992 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, 993 u8 speed_ability) 994 { 995 unsigned long *supported = hdev->hw.mac.supported; 996 997 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 998 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 999 supported); 1000 1001 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 1002 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 1003 supported); 1004 1005 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 1006 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1007 supported); 1008 1009 if (speed_ability & HCLGE_SUPPORT_50G_BIT) 1010 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 1011 supported); 1012 1013 if (speed_ability & HCLGE_SUPPORT_100G_BIT) 1014 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 1015 supported); 1016 1017 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); 1018 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); 1019 } 1020 1021 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) 1022 { 1023 u8 media_type = hdev->hw.mac.media_type; 1024 1025 if (media_type != HNAE3_MEDIA_TYPE_FIBER) 1026 return; 1027 1028 hclge_parse_fiber_link_mode(hdev, speed_ability); 1029 } 1030 1031 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) 1032 { 1033 struct hclge_cfg_param_cmd *req; 1034 u64 mac_addr_tmp_high; 1035 u64 mac_addr_tmp; 1036 int i; 1037 1038 req = (struct hclge_cfg_param_cmd *)desc[0].data; 1039 1040 /* get the configuration */ 1041 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), 1042 HCLGE_CFG_VMDQ_M, 1043 HCLGE_CFG_VMDQ_S); 1044 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]), 1045 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); 1046 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]), 1047 HCLGE_CFG_TQP_DESC_N_M, 1048 HCLGE_CFG_TQP_DESC_N_S); 1049 1050 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]), 1051 HCLGE_CFG_PHY_ADDR_M, 1052 HCLGE_CFG_PHY_ADDR_S); 1053 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]), 1054 HCLGE_CFG_MEDIA_TP_M, 1055 HCLGE_CFG_MEDIA_TP_S); 1056 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]), 1057 HCLGE_CFG_RX_BUF_LEN_M, 1058 HCLGE_CFG_RX_BUF_LEN_S); 1059 /* get mac_address */ 1060 mac_addr_tmp = __le32_to_cpu(req->param[2]); 1061 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]), 1062 HCLGE_CFG_MAC_ADDR_H_M, 1063 HCLGE_CFG_MAC_ADDR_H_S); 1064 1065 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 1066 1067 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), 1068 HCLGE_CFG_DEFAULT_SPEED_M, 1069 HCLGE_CFG_DEFAULT_SPEED_S); 1070 cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]), 1071 HCLGE_CFG_RSS_SIZE_M, 1072 HCLGE_CFG_RSS_SIZE_S); 1073 1074 for (i = 0; i < ETH_ALEN; i++) 1075 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 1076 1077 req = (struct hclge_cfg_param_cmd *)desc[1].data; 1078 cfg->numa_node_map = __le32_to_cpu(req->param[0]); 1079 1080 cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]), 1081 HCLGE_CFG_SPEED_ABILITY_M, 1082 HCLGE_CFG_SPEED_ABILITY_S); 1083 } 1084 1085 /* hclge_get_cfg: query the static parameter from flash 1086 * @hdev: pointer to struct hclge_dev 1087 * @hcfg: the config structure to be getted 1088 */ 1089 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) 1090 { 1091 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; 1092 struct hclge_cfg_param_cmd *req; 1093 int i, ret; 1094 1095 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { 1096 u32 offset = 0; 1097 1098 req = (struct hclge_cfg_param_cmd *)desc[i].data; 1099 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, 1100 true); 1101 hnae_set_field(offset, HCLGE_CFG_OFFSET_M, 1102 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); 1103 /* Len should be united by 4 bytes when send to hardware */ 1104 hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, 1105 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); 1106 req->offset = cpu_to_le32(offset); 1107 } 1108 1109 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); 1110 if (ret) { 1111 dev_err(&hdev->pdev->dev, 1112 "get config failed %d.\n", ret); 1113 return ret; 1114 } 1115 1116 hclge_parse_cfg(hcfg, desc); 1117 return 0; 1118 } 1119 1120 static int hclge_get_cap(struct hclge_dev *hdev) 1121 { 1122 int ret; 1123 1124 ret = hclge_query_function_status(hdev); 1125 if (ret) { 1126 dev_err(&hdev->pdev->dev, 1127 "query function status error %d.\n", ret); 1128 return ret; 1129 } 1130 1131 /* get pf resource */ 1132 ret = hclge_query_pf_resource(hdev); 1133 if (ret) { 1134 dev_err(&hdev->pdev->dev, 1135 "query pf resource error %d.\n", ret); 1136 return ret; 1137 } 1138 1139 return 0; 1140 } 1141 1142 static int hclge_configure(struct hclge_dev *hdev) 1143 { 1144 struct hclge_cfg cfg; 1145 int ret, i; 1146 1147 ret = hclge_get_cfg(hdev, &cfg); 1148 if (ret) { 1149 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); 1150 return ret; 1151 } 1152 1153 hdev->num_vmdq_vport = cfg.vmdq_vport_num; 1154 hdev->base_tqp_pid = 0; 1155 hdev->rss_size_max = cfg.rss_size_max; 1156 hdev->rx_buf_len = cfg.rx_buf_len; 1157 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); 1158 hdev->hw.mac.media_type = cfg.media_type; 1159 hdev->hw.mac.phy_addr = cfg.phy_addr; 1160 hdev->num_desc = cfg.tqp_desc_num; 1161 hdev->tm_info.num_pg = 1; 1162 hdev->tc_max = cfg.tc_num; 1163 hdev->tm_info.hw_pfc_map = 0; 1164 1165 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); 1166 if (ret) { 1167 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); 1168 return ret; 1169 } 1170 1171 hclge_parse_link_mode(hdev, cfg.speed_ability); 1172 1173 if ((hdev->tc_max > HNAE3_MAX_TC) || 1174 (hdev->tc_max < 1)) { 1175 dev_warn(&hdev->pdev->dev, "TC num = %d.\n", 1176 hdev->tc_max); 1177 hdev->tc_max = 1; 1178 } 1179 1180 /* Dev does not support DCB */ 1181 if (!hnae3_dev_dcb_supported(hdev)) { 1182 hdev->tc_max = 1; 1183 hdev->pfc_max = 0; 1184 } else { 1185 hdev->pfc_max = hdev->tc_max; 1186 } 1187 1188 hdev->tm_info.num_tc = hdev->tc_max; 1189 1190 /* Currently not support uncontiuous tc */ 1191 for (i = 0; i < hdev->tm_info.num_tc; i++) 1192 hnae_set_bit(hdev->hw_tc_map, i, 1); 1193 1194 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; 1195 1196 return ret; 1197 } 1198 1199 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, 1200 int tso_mss_max) 1201 { 1202 struct hclge_cfg_tso_status_cmd *req; 1203 struct hclge_desc desc; 1204 u16 tso_mss; 1205 1206 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); 1207 1208 req = (struct hclge_cfg_tso_status_cmd *)desc.data; 1209 1210 tso_mss = 0; 1211 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1212 HCLGE_TSO_MSS_MIN_S, tso_mss_min); 1213 req->tso_mss_min = cpu_to_le16(tso_mss); 1214 1215 tso_mss = 0; 1216 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1217 HCLGE_TSO_MSS_MIN_S, tso_mss_max); 1218 req->tso_mss_max = cpu_to_le16(tso_mss); 1219 1220 return hclge_cmd_send(&hdev->hw, &desc, 1); 1221 } 1222 1223 static int hclge_alloc_tqps(struct hclge_dev *hdev) 1224 { 1225 struct hclge_tqp *tqp; 1226 int i; 1227 1228 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 1229 sizeof(struct hclge_tqp), GFP_KERNEL); 1230 if (!hdev->htqp) 1231 return -ENOMEM; 1232 1233 tqp = hdev->htqp; 1234 1235 for (i = 0; i < hdev->num_tqps; i++) { 1236 tqp->dev = &hdev->pdev->dev; 1237 tqp->index = i; 1238 1239 tqp->q.ae_algo = &ae_algo; 1240 tqp->q.buf_size = hdev->rx_buf_len; 1241 tqp->q.desc_num = hdev->num_desc; 1242 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + 1243 i * HCLGE_TQP_REG_SIZE; 1244 1245 tqp++; 1246 } 1247 1248 return 0; 1249 } 1250 1251 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, 1252 u16 tqp_pid, u16 tqp_vid, bool is_pf) 1253 { 1254 struct hclge_tqp_map_cmd *req; 1255 struct hclge_desc desc; 1256 int ret; 1257 1258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); 1259 1260 req = (struct hclge_tqp_map_cmd *)desc.data; 1261 req->tqp_id = cpu_to_le16(tqp_pid); 1262 req->tqp_vf = func_id; 1263 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | 1264 1 << HCLGE_TQP_MAP_EN_B; 1265 req->tqp_vid = cpu_to_le16(tqp_vid); 1266 1267 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1268 if (ret) { 1269 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", 1270 ret); 1271 return ret; 1272 } 1273 1274 return 0; 1275 } 1276 1277 static int hclge_assign_tqp(struct hclge_vport *vport, 1278 struct hnae3_queue **tqp, u16 num_tqps) 1279 { 1280 struct hclge_dev *hdev = vport->back; 1281 int i, alloced; 1282 1283 for (i = 0, alloced = 0; i < hdev->num_tqps && 1284 alloced < num_tqps; i++) { 1285 if (!hdev->htqp[i].alloced) { 1286 hdev->htqp[i].q.handle = &vport->nic; 1287 hdev->htqp[i].q.tqp_index = alloced; 1288 tqp[alloced] = &hdev->htqp[i].q; 1289 hdev->htqp[i].alloced = true; 1290 alloced++; 1291 } 1292 } 1293 vport->alloc_tqps = num_tqps; 1294 1295 return 0; 1296 } 1297 1298 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) 1299 { 1300 struct hnae3_handle *nic = &vport->nic; 1301 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1302 struct hclge_dev *hdev = vport->back; 1303 int i, ret; 1304 1305 kinfo->num_desc = hdev->num_desc; 1306 kinfo->rx_buf_len = hdev->rx_buf_len; 1307 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); 1308 kinfo->rss_size 1309 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); 1310 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; 1311 1312 for (i = 0; i < HNAE3_MAX_TC; i++) { 1313 if (hdev->hw_tc_map & BIT(i)) { 1314 kinfo->tc_info[i].enable = true; 1315 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; 1316 kinfo->tc_info[i].tqp_count = kinfo->rss_size; 1317 kinfo->tc_info[i].tc = i; 1318 } else { 1319 /* Set to default queue if TC is disable */ 1320 kinfo->tc_info[i].enable = false; 1321 kinfo->tc_info[i].tqp_offset = 0; 1322 kinfo->tc_info[i].tqp_count = 1; 1323 kinfo->tc_info[i].tc = 0; 1324 } 1325 } 1326 1327 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 1328 sizeof(struct hnae3_queue *), GFP_KERNEL); 1329 if (!kinfo->tqp) 1330 return -ENOMEM; 1331 1332 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps); 1333 if (ret) { 1334 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); 1335 return -EINVAL; 1336 } 1337 1338 return 0; 1339 } 1340 1341 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, 1342 struct hclge_vport *vport) 1343 { 1344 struct hnae3_handle *nic = &vport->nic; 1345 struct hnae3_knic_private_info *kinfo; 1346 u16 i; 1347 1348 kinfo = &nic->kinfo; 1349 for (i = 0; i < kinfo->num_tqps; i++) { 1350 struct hclge_tqp *q = 1351 container_of(kinfo->tqp[i], struct hclge_tqp, q); 1352 bool is_pf; 1353 int ret; 1354 1355 is_pf = !(vport->vport_id); 1356 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, 1357 i, is_pf); 1358 if (ret) 1359 return ret; 1360 } 1361 1362 return 0; 1363 } 1364 1365 static int hclge_map_tqp(struct hclge_dev *hdev) 1366 { 1367 struct hclge_vport *vport = hdev->vport; 1368 u16 i, num_vport; 1369 1370 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1371 for (i = 0; i < num_vport; i++) { 1372 int ret; 1373 1374 ret = hclge_map_tqp_to_vport(hdev, vport); 1375 if (ret) 1376 return ret; 1377 1378 vport++; 1379 } 1380 1381 return 0; 1382 } 1383 1384 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) 1385 { 1386 /* this would be initialized later */ 1387 } 1388 1389 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) 1390 { 1391 struct hnae3_handle *nic = &vport->nic; 1392 struct hclge_dev *hdev = vport->back; 1393 int ret; 1394 1395 nic->pdev = hdev->pdev; 1396 nic->ae_algo = &ae_algo; 1397 nic->numa_node_mask = hdev->numa_node_mask; 1398 1399 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { 1400 ret = hclge_knic_setup(vport, num_tqps); 1401 if (ret) { 1402 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", 1403 ret); 1404 return ret; 1405 } 1406 } else { 1407 hclge_unic_setup(vport, num_tqps); 1408 } 1409 1410 return 0; 1411 } 1412 1413 static int hclge_alloc_vport(struct hclge_dev *hdev) 1414 { 1415 struct pci_dev *pdev = hdev->pdev; 1416 struct hclge_vport *vport; 1417 u32 tqp_main_vport; 1418 u32 tqp_per_vport; 1419 int num_vport, i; 1420 int ret; 1421 1422 /* We need to alloc a vport for main NIC of PF */ 1423 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1424 1425 if (hdev->num_tqps < num_vport) { 1426 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", 1427 hdev->num_tqps, num_vport); 1428 return -EINVAL; 1429 } 1430 1431 /* Alloc the same number of TQPs for every vport */ 1432 tqp_per_vport = hdev->num_tqps / num_vport; 1433 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; 1434 1435 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), 1436 GFP_KERNEL); 1437 if (!vport) 1438 return -ENOMEM; 1439 1440 hdev->vport = vport; 1441 hdev->num_alloc_vport = num_vport; 1442 1443 if (IS_ENABLED(CONFIG_PCI_IOV)) 1444 hdev->num_alloc_vfs = hdev->num_req_vfs; 1445 1446 for (i = 0; i < num_vport; i++) { 1447 vport->back = hdev; 1448 vport->vport_id = i; 1449 1450 if (i == 0) 1451 ret = hclge_vport_setup(vport, tqp_main_vport); 1452 else 1453 ret = hclge_vport_setup(vport, tqp_per_vport); 1454 if (ret) { 1455 dev_err(&pdev->dev, 1456 "vport setup failed for vport %d, %d\n", 1457 i, ret); 1458 return ret; 1459 } 1460 1461 vport++; 1462 } 1463 1464 return 0; 1465 } 1466 1467 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, 1468 struct hclge_pkt_buf_alloc *buf_alloc) 1469 { 1470 /* TX buffer size is unit by 128 byte */ 1471 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 1472 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) 1473 struct hclge_tx_buff_alloc_cmd *req; 1474 struct hclge_desc desc; 1475 int ret; 1476 u8 i; 1477 1478 req = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1479 1480 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); 1481 for (i = 0; i < HCLGE_TC_NUM; i++) { 1482 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 1483 1484 req->tx_pkt_buff[i] = 1485 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | 1486 HCLGE_BUF_SIZE_UPDATE_EN_MSK); 1487 } 1488 1489 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1490 if (ret) { 1491 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", 1492 ret); 1493 return ret; 1494 } 1495 1496 return 0; 1497 } 1498 1499 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, 1500 struct hclge_pkt_buf_alloc *buf_alloc) 1501 { 1502 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); 1503 1504 if (ret) { 1505 dev_err(&hdev->pdev->dev, 1506 "tx buffer alloc failed %d\n", ret); 1507 return ret; 1508 } 1509 1510 return 0; 1511 } 1512 1513 static int hclge_get_tc_num(struct hclge_dev *hdev) 1514 { 1515 int i, cnt = 0; 1516 1517 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1518 if (hdev->hw_tc_map & BIT(i)) 1519 cnt++; 1520 return cnt; 1521 } 1522 1523 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) 1524 { 1525 int i, cnt = 0; 1526 1527 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1528 if (hdev->hw_tc_map & BIT(i) && 1529 hdev->tm_info.hw_pfc_map & BIT(i)) 1530 cnt++; 1531 return cnt; 1532 } 1533 1534 /* Get the number of pfc enabled TCs, which have private buffer */ 1535 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, 1536 struct hclge_pkt_buf_alloc *buf_alloc) 1537 { 1538 struct hclge_priv_buf *priv; 1539 int i, cnt = 0; 1540 1541 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1542 priv = &buf_alloc->priv_buf[i]; 1543 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && 1544 priv->enable) 1545 cnt++; 1546 } 1547 1548 return cnt; 1549 } 1550 1551 /* Get the number of pfc disabled TCs, which have private buffer */ 1552 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, 1553 struct hclge_pkt_buf_alloc *buf_alloc) 1554 { 1555 struct hclge_priv_buf *priv; 1556 int i, cnt = 0; 1557 1558 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1559 priv = &buf_alloc->priv_buf[i]; 1560 if (hdev->hw_tc_map & BIT(i) && 1561 !(hdev->tm_info.hw_pfc_map & BIT(i)) && 1562 priv->enable) 1563 cnt++; 1564 } 1565 1566 return cnt; 1567 } 1568 1569 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1570 { 1571 struct hclge_priv_buf *priv; 1572 u32 rx_priv = 0; 1573 int i; 1574 1575 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1576 priv = &buf_alloc->priv_buf[i]; 1577 if (priv->enable) 1578 rx_priv += priv->buf_size; 1579 } 1580 return rx_priv; 1581 } 1582 1583 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1584 { 1585 u32 i, total_tx_size = 0; 1586 1587 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1588 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 1589 1590 return total_tx_size; 1591 } 1592 1593 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, 1594 struct hclge_pkt_buf_alloc *buf_alloc, 1595 u32 rx_all) 1596 { 1597 u32 shared_buf_min, shared_buf_tc, shared_std; 1598 int tc_num, pfc_enable_num; 1599 u32 shared_buf; 1600 u32 rx_priv; 1601 int i; 1602 1603 tc_num = hclge_get_tc_num(hdev); 1604 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); 1605 1606 if (hnae3_dev_dcb_supported(hdev)) 1607 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; 1608 else 1609 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; 1610 1611 shared_buf_tc = pfc_enable_num * hdev->mps + 1612 (tc_num - pfc_enable_num) * hdev->mps / 2 + 1613 hdev->mps; 1614 shared_std = max_t(u32, shared_buf_min, shared_buf_tc); 1615 1616 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); 1617 if (rx_all <= rx_priv + shared_std) 1618 return false; 1619 1620 shared_buf = rx_all - rx_priv; 1621 buf_alloc->s_buf.buf_size = shared_buf; 1622 buf_alloc->s_buf.self.high = shared_buf; 1623 buf_alloc->s_buf.self.low = 2 * hdev->mps; 1624 1625 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1626 if ((hdev->hw_tc_map & BIT(i)) && 1627 (hdev->tm_info.hw_pfc_map & BIT(i))) { 1628 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; 1629 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; 1630 } else { 1631 buf_alloc->s_buf.tc_thrd[i].low = 0; 1632 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; 1633 } 1634 } 1635 1636 return true; 1637 } 1638 1639 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, 1640 struct hclge_pkt_buf_alloc *buf_alloc) 1641 { 1642 u32 i, total_size; 1643 1644 total_size = hdev->pkt_buf_size; 1645 1646 /* alloc tx buffer for all enabled tc */ 1647 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1648 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1649 1650 if (total_size < HCLGE_DEFAULT_TX_BUF) 1651 return -ENOMEM; 1652 1653 if (hdev->hw_tc_map & BIT(i)) 1654 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; 1655 else 1656 priv->tx_buf_size = 0; 1657 1658 total_size -= priv->tx_buf_size; 1659 } 1660 1661 return 0; 1662 } 1663 1664 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs 1665 * @hdev: pointer to struct hclge_dev 1666 * @buf_alloc: pointer to buffer calculation data 1667 * @return: 0: calculate sucessful, negative: fail 1668 */ 1669 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, 1670 struct hclge_pkt_buf_alloc *buf_alloc) 1671 { 1672 u32 rx_all = hdev->pkt_buf_size; 1673 int no_pfc_priv_num, pfc_priv_num; 1674 struct hclge_priv_buf *priv; 1675 int i; 1676 1677 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); 1678 1679 /* When DCB is not supported, rx private 1680 * buffer is not allocated. 1681 */ 1682 if (!hnae3_dev_dcb_supported(hdev)) { 1683 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1684 return -ENOMEM; 1685 1686 return 0; 1687 } 1688 1689 /* step 1, try to alloc private buffer for all enabled tc */ 1690 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1691 priv = &buf_alloc->priv_buf[i]; 1692 if (hdev->hw_tc_map & BIT(i)) { 1693 priv->enable = 1; 1694 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1695 priv->wl.low = hdev->mps; 1696 priv->wl.high = priv->wl.low + hdev->mps; 1697 priv->buf_size = priv->wl.high + 1698 HCLGE_DEFAULT_DV; 1699 } else { 1700 priv->wl.low = 0; 1701 priv->wl.high = 2 * hdev->mps; 1702 priv->buf_size = priv->wl.high; 1703 } 1704 } else { 1705 priv->enable = 0; 1706 priv->wl.low = 0; 1707 priv->wl.high = 0; 1708 priv->buf_size = 0; 1709 } 1710 } 1711 1712 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1713 return 0; 1714 1715 /* step 2, try to decrease the buffer size of 1716 * no pfc TC's private buffer 1717 */ 1718 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1719 priv = &buf_alloc->priv_buf[i]; 1720 1721 priv->enable = 0; 1722 priv->wl.low = 0; 1723 priv->wl.high = 0; 1724 priv->buf_size = 0; 1725 1726 if (!(hdev->hw_tc_map & BIT(i))) 1727 continue; 1728 1729 priv->enable = 1; 1730 1731 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1732 priv->wl.low = 128; 1733 priv->wl.high = priv->wl.low + hdev->mps; 1734 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; 1735 } else { 1736 priv->wl.low = 0; 1737 priv->wl.high = hdev->mps; 1738 priv->buf_size = priv->wl.high; 1739 } 1740 } 1741 1742 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1743 return 0; 1744 1745 /* step 3, try to reduce the number of pfc disabled TCs, 1746 * which have private buffer 1747 */ 1748 /* get the total no pfc enable TC number, which have private buffer */ 1749 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); 1750 1751 /* let the last to be cleared first */ 1752 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1753 priv = &buf_alloc->priv_buf[i]; 1754 1755 if (hdev->hw_tc_map & BIT(i) && 1756 !(hdev->tm_info.hw_pfc_map & BIT(i))) { 1757 /* Clear the no pfc TC private buffer */ 1758 priv->wl.low = 0; 1759 priv->wl.high = 0; 1760 priv->buf_size = 0; 1761 priv->enable = 0; 1762 no_pfc_priv_num--; 1763 } 1764 1765 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1766 no_pfc_priv_num == 0) 1767 break; 1768 } 1769 1770 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1771 return 0; 1772 1773 /* step 4, try to reduce the number of pfc enabled TCs 1774 * which have private buffer. 1775 */ 1776 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); 1777 1778 /* let the last to be cleared first */ 1779 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1780 priv = &buf_alloc->priv_buf[i]; 1781 1782 if (hdev->hw_tc_map & BIT(i) && 1783 hdev->tm_info.hw_pfc_map & BIT(i)) { 1784 /* Reduce the number of pfc TC with private buffer */ 1785 priv->wl.low = 0; 1786 priv->enable = 0; 1787 priv->wl.high = 0; 1788 priv->buf_size = 0; 1789 pfc_priv_num--; 1790 } 1791 1792 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1793 pfc_priv_num == 0) 1794 break; 1795 } 1796 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1797 return 0; 1798 1799 return -ENOMEM; 1800 } 1801 1802 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, 1803 struct hclge_pkt_buf_alloc *buf_alloc) 1804 { 1805 struct hclge_rx_priv_buff_cmd *req; 1806 struct hclge_desc desc; 1807 int ret; 1808 int i; 1809 1810 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); 1811 req = (struct hclge_rx_priv_buff_cmd *)desc.data; 1812 1813 /* Alloc private buffer TCs */ 1814 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1815 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1816 1817 req->buf_num[i] = 1818 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); 1819 req->buf_num[i] |= 1820 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); 1821 } 1822 1823 req->shared_buf = 1824 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 1825 (1 << HCLGE_TC0_PRI_BUF_EN_B)); 1826 1827 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1828 if (ret) { 1829 dev_err(&hdev->pdev->dev, 1830 "rx private buffer alloc cmd failed %d\n", ret); 1831 return ret; 1832 } 1833 1834 return 0; 1835 } 1836 1837 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) 1838 1839 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, 1840 struct hclge_pkt_buf_alloc *buf_alloc) 1841 { 1842 struct hclge_rx_priv_wl_buf *req; 1843 struct hclge_priv_buf *priv; 1844 struct hclge_desc desc[2]; 1845 int i, j; 1846 int ret; 1847 1848 for (i = 0; i < 2; i++) { 1849 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, 1850 false); 1851 req = (struct hclge_rx_priv_wl_buf *)desc[i].data; 1852 1853 /* The first descriptor set the NEXT bit to 1 */ 1854 if (i == 0) 1855 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1856 else 1857 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1858 1859 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1860 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; 1861 1862 priv = &buf_alloc->priv_buf[idx]; 1863 req->tc_wl[j].high = 1864 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); 1865 req->tc_wl[j].high |= 1866 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) << 1867 HCLGE_RX_PRIV_EN_B); 1868 req->tc_wl[j].low = 1869 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); 1870 req->tc_wl[j].low |= 1871 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) << 1872 HCLGE_RX_PRIV_EN_B); 1873 } 1874 } 1875 1876 /* Send 2 descriptor at one time */ 1877 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1878 if (ret) { 1879 dev_err(&hdev->pdev->dev, 1880 "rx private waterline config cmd failed %d\n", 1881 ret); 1882 return ret; 1883 } 1884 return 0; 1885 } 1886 1887 static int hclge_common_thrd_config(struct hclge_dev *hdev, 1888 struct hclge_pkt_buf_alloc *buf_alloc) 1889 { 1890 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; 1891 struct hclge_rx_com_thrd *req; 1892 struct hclge_desc desc[2]; 1893 struct hclge_tc_thrd *tc; 1894 int i, j; 1895 int ret; 1896 1897 for (i = 0; i < 2; i++) { 1898 hclge_cmd_setup_basic_desc(&desc[i], 1899 HCLGE_OPC_RX_COM_THRD_ALLOC, false); 1900 req = (struct hclge_rx_com_thrd *)&desc[i].data; 1901 1902 /* The first descriptor set the NEXT bit to 1 */ 1903 if (i == 0) 1904 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1905 else 1906 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1907 1908 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1909 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; 1910 1911 req->com_thrd[j].high = 1912 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); 1913 req->com_thrd[j].high |= 1914 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) << 1915 HCLGE_RX_PRIV_EN_B); 1916 req->com_thrd[j].low = 1917 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); 1918 req->com_thrd[j].low |= 1919 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) << 1920 HCLGE_RX_PRIV_EN_B); 1921 } 1922 } 1923 1924 /* Send 2 descriptors at one time */ 1925 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1926 if (ret) { 1927 dev_err(&hdev->pdev->dev, 1928 "common threshold config cmd failed %d\n", ret); 1929 return ret; 1930 } 1931 return 0; 1932 } 1933 1934 static int hclge_common_wl_config(struct hclge_dev *hdev, 1935 struct hclge_pkt_buf_alloc *buf_alloc) 1936 { 1937 struct hclge_shared_buf *buf = &buf_alloc->s_buf; 1938 struct hclge_rx_com_wl *req; 1939 struct hclge_desc desc; 1940 int ret; 1941 1942 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); 1943 1944 req = (struct hclge_rx_com_wl *)desc.data; 1945 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); 1946 req->com_wl.high |= 1947 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) << 1948 HCLGE_RX_PRIV_EN_B); 1949 1950 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); 1951 req->com_wl.low |= 1952 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) << 1953 HCLGE_RX_PRIV_EN_B); 1954 1955 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1956 if (ret) { 1957 dev_err(&hdev->pdev->dev, 1958 "common waterline config cmd failed %d\n", ret); 1959 return ret; 1960 } 1961 1962 return 0; 1963 } 1964 1965 int hclge_buffer_alloc(struct hclge_dev *hdev) 1966 { 1967 struct hclge_pkt_buf_alloc *pkt_buf; 1968 int ret; 1969 1970 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); 1971 if (!pkt_buf) 1972 return -ENOMEM; 1973 1974 ret = hclge_tx_buffer_calc(hdev, pkt_buf); 1975 if (ret) { 1976 dev_err(&hdev->pdev->dev, 1977 "could not calc tx buffer size for all TCs %d\n", ret); 1978 goto out; 1979 } 1980 1981 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); 1982 if (ret) { 1983 dev_err(&hdev->pdev->dev, 1984 "could not alloc tx buffers %d\n", ret); 1985 goto out; 1986 } 1987 1988 ret = hclge_rx_buffer_calc(hdev, pkt_buf); 1989 if (ret) { 1990 dev_err(&hdev->pdev->dev, 1991 "could not calc rx priv buffer size for all TCs %d\n", 1992 ret); 1993 goto out; 1994 } 1995 1996 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); 1997 if (ret) { 1998 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", 1999 ret); 2000 goto out; 2001 } 2002 2003 if (hnae3_dev_dcb_supported(hdev)) { 2004 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); 2005 if (ret) { 2006 dev_err(&hdev->pdev->dev, 2007 "could not configure rx private waterline %d\n", 2008 ret); 2009 goto out; 2010 } 2011 2012 ret = hclge_common_thrd_config(hdev, pkt_buf); 2013 if (ret) { 2014 dev_err(&hdev->pdev->dev, 2015 "could not configure common threshold %d\n", 2016 ret); 2017 goto out; 2018 } 2019 } 2020 2021 ret = hclge_common_wl_config(hdev, pkt_buf); 2022 if (ret) 2023 dev_err(&hdev->pdev->dev, 2024 "could not configure common waterline %d\n", ret); 2025 2026 out: 2027 kfree(pkt_buf); 2028 return ret; 2029 } 2030 2031 static int hclge_init_roce_base_info(struct hclge_vport *vport) 2032 { 2033 struct hnae3_handle *roce = &vport->roce; 2034 struct hnae3_handle *nic = &vport->nic; 2035 2036 roce->rinfo.num_vectors = vport->back->num_roce_msi; 2037 2038 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || 2039 vport->back->num_msi_left == 0) 2040 return -EINVAL; 2041 2042 roce->rinfo.base_vector = vport->back->roce_base_vector; 2043 2044 roce->rinfo.netdev = nic->kinfo.netdev; 2045 roce->rinfo.roce_io_base = vport->back->hw.io_base; 2046 2047 roce->pdev = nic->pdev; 2048 roce->ae_algo = nic->ae_algo; 2049 roce->numa_node_mask = nic->numa_node_mask; 2050 2051 return 0; 2052 } 2053 2054 static int hclge_init_msi(struct hclge_dev *hdev) 2055 { 2056 struct pci_dev *pdev = hdev->pdev; 2057 int vectors; 2058 int i; 2059 2060 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 2061 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2062 if (vectors < 0) { 2063 dev_err(&pdev->dev, 2064 "failed(%d) to allocate MSI/MSI-X vectors\n", 2065 vectors); 2066 return vectors; 2067 } 2068 if (vectors < hdev->num_msi) 2069 dev_warn(&hdev->pdev->dev, 2070 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2071 hdev->num_msi, vectors); 2072 2073 hdev->num_msi = vectors; 2074 hdev->num_msi_left = vectors; 2075 hdev->base_msi_vector = pdev->irq; 2076 hdev->roce_base_vector = hdev->base_msi_vector + 2077 HCLGE_ROCE_VECTOR_OFFSET; 2078 2079 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2080 sizeof(u16), GFP_KERNEL); 2081 if (!hdev->vector_status) { 2082 pci_free_irq_vectors(pdev); 2083 return -ENOMEM; 2084 } 2085 2086 for (i = 0; i < hdev->num_msi; i++) 2087 hdev->vector_status[i] = HCLGE_INVALID_VPORT; 2088 2089 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2090 sizeof(int), GFP_KERNEL); 2091 if (!hdev->vector_irq) { 2092 pci_free_irq_vectors(pdev); 2093 return -ENOMEM; 2094 } 2095 2096 return 0; 2097 } 2098 2099 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) 2100 { 2101 struct hclge_mac *mac = &hdev->hw.mac; 2102 2103 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) 2104 mac->duplex = (u8)duplex; 2105 else 2106 mac->duplex = HCLGE_MAC_FULL; 2107 2108 mac->speed = speed; 2109 } 2110 2111 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) 2112 { 2113 struct hclge_config_mac_speed_dup_cmd *req; 2114 struct hclge_desc desc; 2115 int ret; 2116 2117 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 2118 2119 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); 2120 2121 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); 2122 2123 switch (speed) { 2124 case HCLGE_MAC_SPEED_10M: 2125 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2126 HCLGE_CFG_SPEED_S, 6); 2127 break; 2128 case HCLGE_MAC_SPEED_100M: 2129 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2130 HCLGE_CFG_SPEED_S, 7); 2131 break; 2132 case HCLGE_MAC_SPEED_1G: 2133 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2134 HCLGE_CFG_SPEED_S, 0); 2135 break; 2136 case HCLGE_MAC_SPEED_10G: 2137 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2138 HCLGE_CFG_SPEED_S, 1); 2139 break; 2140 case HCLGE_MAC_SPEED_25G: 2141 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2142 HCLGE_CFG_SPEED_S, 2); 2143 break; 2144 case HCLGE_MAC_SPEED_40G: 2145 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2146 HCLGE_CFG_SPEED_S, 3); 2147 break; 2148 case HCLGE_MAC_SPEED_50G: 2149 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2150 HCLGE_CFG_SPEED_S, 4); 2151 break; 2152 case HCLGE_MAC_SPEED_100G: 2153 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2154 HCLGE_CFG_SPEED_S, 5); 2155 break; 2156 default: 2157 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); 2158 return -EINVAL; 2159 } 2160 2161 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 2162 1); 2163 2164 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2165 if (ret) { 2166 dev_err(&hdev->pdev->dev, 2167 "mac speed/duplex config cmd failed %d.\n", ret); 2168 return ret; 2169 } 2170 2171 hclge_check_speed_dup(hdev, duplex, speed); 2172 2173 return 0; 2174 } 2175 2176 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, 2177 u8 duplex) 2178 { 2179 struct hclge_vport *vport = hclge_get_vport(handle); 2180 struct hclge_dev *hdev = vport->back; 2181 2182 return hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2183 } 2184 2185 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, 2186 u8 *duplex) 2187 { 2188 struct hclge_query_an_speed_dup_cmd *req; 2189 struct hclge_desc desc; 2190 int speed_tmp; 2191 int ret; 2192 2193 req = (struct hclge_query_an_speed_dup_cmd *)desc.data; 2194 2195 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); 2196 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2197 if (ret) { 2198 dev_err(&hdev->pdev->dev, 2199 "mac speed/autoneg/duplex query cmd failed %d\n", 2200 ret); 2201 return ret; 2202 } 2203 2204 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); 2205 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, 2206 HCLGE_QUERY_SPEED_S); 2207 2208 ret = hclge_parse_speed(speed_tmp, speed); 2209 if (ret) { 2210 dev_err(&hdev->pdev->dev, 2211 "could not parse speed(=%d), %d\n", speed_tmp, ret); 2212 return -EIO; 2213 } 2214 2215 return 0; 2216 } 2217 2218 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) 2219 { 2220 struct hclge_config_auto_neg_cmd *req; 2221 struct hclge_desc desc; 2222 u32 flag = 0; 2223 int ret; 2224 2225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); 2226 2227 req = (struct hclge_config_auto_neg_cmd *)desc.data; 2228 hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); 2229 req->cfg_an_cmd_flag = cpu_to_le32(flag); 2230 2231 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2232 if (ret) { 2233 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", 2234 ret); 2235 return ret; 2236 } 2237 2238 return 0; 2239 } 2240 2241 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) 2242 { 2243 struct hclge_vport *vport = hclge_get_vport(handle); 2244 struct hclge_dev *hdev = vport->back; 2245 2246 return hclge_set_autoneg_en(hdev, enable); 2247 } 2248 2249 static int hclge_get_autoneg(struct hnae3_handle *handle) 2250 { 2251 struct hclge_vport *vport = hclge_get_vport(handle); 2252 struct hclge_dev *hdev = vport->back; 2253 struct phy_device *phydev = hdev->hw.mac.phydev; 2254 2255 if (phydev) 2256 return phydev->autoneg; 2257 2258 return hdev->hw.mac.autoneg; 2259 } 2260 2261 static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, 2262 bool mask_vlan, 2263 u8 *mac_mask) 2264 { 2265 struct hclge_mac_vlan_mask_entry_cmd *req; 2266 struct hclge_desc desc; 2267 int status; 2268 2269 req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; 2270 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); 2271 2272 hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, 2273 mask_vlan ? 1 : 0); 2274 ether_addr_copy(req->mac_mask, mac_mask); 2275 2276 status = hclge_cmd_send(&hdev->hw, &desc, 1); 2277 if (status) 2278 dev_err(&hdev->pdev->dev, 2279 "Config mac_vlan_mask failed for cmd_send, ret =%d\n", 2280 status); 2281 2282 return status; 2283 } 2284 2285 static int hclge_mac_init(struct hclge_dev *hdev) 2286 { 2287 struct hnae3_handle *handle = &hdev->vport[0].nic; 2288 struct net_device *netdev = handle->kinfo.netdev; 2289 struct hclge_mac *mac = &hdev->hw.mac; 2290 u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 2291 struct hclge_vport *vport; 2292 int mtu; 2293 int ret; 2294 int i; 2295 2296 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); 2297 if (ret) { 2298 dev_err(&hdev->pdev->dev, 2299 "Config mac speed dup fail ret=%d\n", ret); 2300 return ret; 2301 } 2302 2303 mac->link = 0; 2304 2305 /* Initialize the MTA table work mode */ 2306 hdev->enable_mta = true; 2307 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; 2308 2309 ret = hclge_set_mta_filter_mode(hdev, 2310 hdev->mta_mac_sel_type, 2311 hdev->enable_mta); 2312 if (ret) { 2313 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", 2314 ret); 2315 return ret; 2316 } 2317 2318 for (i = 0; i < hdev->num_alloc_vport; i++) { 2319 vport = &hdev->vport[i]; 2320 vport->accept_mta_mc = false; 2321 2322 memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow)); 2323 ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false); 2324 if (ret) { 2325 dev_err(&hdev->pdev->dev, 2326 "set mta filter mode fail ret=%d\n", ret); 2327 return ret; 2328 } 2329 } 2330 2331 ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); 2332 if (ret) { 2333 dev_err(&hdev->pdev->dev, 2334 "set default mac_vlan_mask fail ret=%d\n", ret); 2335 return ret; 2336 } 2337 2338 if (netdev) 2339 mtu = netdev->mtu; 2340 else 2341 mtu = ETH_DATA_LEN; 2342 2343 ret = hclge_set_mtu(handle, mtu); 2344 if (ret) { 2345 dev_err(&hdev->pdev->dev, 2346 "set mtu failed ret=%d\n", ret); 2347 return ret; 2348 } 2349 2350 return 0; 2351 } 2352 2353 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) 2354 { 2355 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2356 schedule_work(&hdev->mbx_service_task); 2357 } 2358 2359 static void hclge_reset_task_schedule(struct hclge_dev *hdev) 2360 { 2361 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) 2362 schedule_work(&hdev->rst_service_task); 2363 } 2364 2365 static void hclge_task_schedule(struct hclge_dev *hdev) 2366 { 2367 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && 2368 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2369 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) 2370 (void)schedule_work(&hdev->service_task); 2371 } 2372 2373 static int hclge_get_mac_link_status(struct hclge_dev *hdev) 2374 { 2375 struct hclge_link_status_cmd *req; 2376 struct hclge_desc desc; 2377 int link_status; 2378 int ret; 2379 2380 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); 2381 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2382 if (ret) { 2383 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", 2384 ret); 2385 return ret; 2386 } 2387 2388 req = (struct hclge_link_status_cmd *)desc.data; 2389 link_status = req->status & HCLGE_LINK_STATUS; 2390 2391 return !!link_status; 2392 } 2393 2394 static int hclge_get_mac_phy_link(struct hclge_dev *hdev) 2395 { 2396 int mac_state; 2397 int link_stat; 2398 2399 mac_state = hclge_get_mac_link_status(hdev); 2400 2401 if (hdev->hw.mac.phydev) { 2402 if (!genphy_read_status(hdev->hw.mac.phydev)) 2403 link_stat = mac_state & 2404 hdev->hw.mac.phydev->link; 2405 else 2406 link_stat = 0; 2407 2408 } else { 2409 link_stat = mac_state; 2410 } 2411 2412 return !!link_stat; 2413 } 2414 2415 static void hclge_update_link_status(struct hclge_dev *hdev) 2416 { 2417 struct hnae3_client *client = hdev->nic_client; 2418 struct hnae3_handle *handle; 2419 int state; 2420 int i; 2421 2422 if (!client) 2423 return; 2424 state = hclge_get_mac_phy_link(hdev); 2425 if (state != hdev->hw.mac.link) { 2426 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2427 handle = &hdev->vport[i].nic; 2428 client->ops->link_status_change(handle, state); 2429 } 2430 hdev->hw.mac.link = state; 2431 } 2432 } 2433 2434 static int hclge_update_speed_duplex(struct hclge_dev *hdev) 2435 { 2436 struct hclge_mac mac = hdev->hw.mac; 2437 u8 duplex; 2438 int speed; 2439 int ret; 2440 2441 /* get the speed and duplex as autoneg'result from mac cmd when phy 2442 * doesn't exit. 2443 */ 2444 if (mac.phydev || !mac.autoneg) 2445 return 0; 2446 2447 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); 2448 if (ret) { 2449 dev_err(&hdev->pdev->dev, 2450 "mac autoneg/speed/duplex query failed %d\n", ret); 2451 return ret; 2452 } 2453 2454 if ((mac.speed != speed) || (mac.duplex != duplex)) { 2455 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2456 if (ret) { 2457 dev_err(&hdev->pdev->dev, 2458 "mac speed/duplex config failed %d\n", ret); 2459 return ret; 2460 } 2461 } 2462 2463 return 0; 2464 } 2465 2466 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) 2467 { 2468 struct hclge_vport *vport = hclge_get_vport(handle); 2469 struct hclge_dev *hdev = vport->back; 2470 2471 return hclge_update_speed_duplex(hdev); 2472 } 2473 2474 static int hclge_get_status(struct hnae3_handle *handle) 2475 { 2476 struct hclge_vport *vport = hclge_get_vport(handle); 2477 struct hclge_dev *hdev = vport->back; 2478 2479 hclge_update_link_status(hdev); 2480 2481 return hdev->hw.mac.link; 2482 } 2483 2484 static void hclge_service_timer(struct timer_list *t) 2485 { 2486 struct hclge_dev *hdev = from_timer(hdev, t, service_timer); 2487 2488 mod_timer(&hdev->service_timer, jiffies + HZ); 2489 hdev->hw_stats.stats_timer++; 2490 hclge_task_schedule(hdev); 2491 } 2492 2493 static void hclge_service_complete(struct hclge_dev *hdev) 2494 { 2495 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); 2496 2497 /* Flush memory before next watchdog */ 2498 smp_mb__before_atomic(); 2499 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); 2500 } 2501 2502 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) 2503 { 2504 u32 rst_src_reg; 2505 u32 cmdq_src_reg; 2506 2507 /* fetch the events from their corresponding regs */ 2508 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); 2509 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); 2510 2511 /* Assumption: If by any chance reset and mailbox events are reported 2512 * together then we will only process reset event in this go and will 2513 * defer the processing of the mailbox events. Since, we would have not 2514 * cleared RX CMDQ event this time we would receive again another 2515 * interrupt from H/W just for the mailbox. 2516 */ 2517 2518 /* check for vector0 reset event sources */ 2519 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { 2520 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); 2521 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 2522 return HCLGE_VECTOR0_EVENT_RST; 2523 } 2524 2525 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { 2526 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); 2527 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); 2528 return HCLGE_VECTOR0_EVENT_RST; 2529 } 2530 2531 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { 2532 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); 2533 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 2534 return HCLGE_VECTOR0_EVENT_RST; 2535 } 2536 2537 /* check for vector0 mailbox(=CMDQ RX) event source */ 2538 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 2539 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); 2540 *clearval = cmdq_src_reg; 2541 return HCLGE_VECTOR0_EVENT_MBX; 2542 } 2543 2544 return HCLGE_VECTOR0_EVENT_OTHER; 2545 } 2546 2547 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, 2548 u32 regclr) 2549 { 2550 switch (event_type) { 2551 case HCLGE_VECTOR0_EVENT_RST: 2552 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); 2553 break; 2554 case HCLGE_VECTOR0_EVENT_MBX: 2555 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); 2556 break; 2557 } 2558 } 2559 2560 static void hclge_clear_all_event_cause(struct hclge_dev *hdev) 2561 { 2562 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, 2563 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | 2564 BIT(HCLGE_VECTOR0_CORERESET_INT_B) | 2565 BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); 2566 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); 2567 } 2568 2569 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) 2570 { 2571 writel(enable ? 1 : 0, vector->addr); 2572 } 2573 2574 static irqreturn_t hclge_misc_irq_handle(int irq, void *data) 2575 { 2576 struct hclge_dev *hdev = data; 2577 u32 event_cause; 2578 u32 clearval; 2579 2580 hclge_enable_vector(&hdev->misc_vector, false); 2581 event_cause = hclge_check_event_cause(hdev, &clearval); 2582 2583 /* vector 0 interrupt is shared with reset and mailbox source events.*/ 2584 switch (event_cause) { 2585 case HCLGE_VECTOR0_EVENT_RST: 2586 hclge_reset_task_schedule(hdev); 2587 break; 2588 case HCLGE_VECTOR0_EVENT_MBX: 2589 /* If we are here then, 2590 * 1. Either we are not handling any mbx task and we are not 2591 * scheduled as well 2592 * OR 2593 * 2. We could be handling a mbx task but nothing more is 2594 * scheduled. 2595 * In both cases, we should schedule mbx task as there are more 2596 * mbx messages reported by this interrupt. 2597 */ 2598 hclge_mbx_task_schedule(hdev); 2599 break; 2600 default: 2601 dev_warn(&hdev->pdev->dev, 2602 "received unknown or unhandled event of vector0\n"); 2603 break; 2604 } 2605 2606 /* clear the source of interrupt if it is not cause by reset */ 2607 if (event_cause != HCLGE_VECTOR0_EVENT_RST) { 2608 hclge_clear_event_cause(hdev, event_cause, clearval); 2609 hclge_enable_vector(&hdev->misc_vector, true); 2610 } 2611 2612 return IRQ_HANDLED; 2613 } 2614 2615 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) 2616 { 2617 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; 2618 hdev->num_msi_left += 1; 2619 hdev->num_msi_used -= 1; 2620 } 2621 2622 static void hclge_get_misc_vector(struct hclge_dev *hdev) 2623 { 2624 struct hclge_misc_vector *vector = &hdev->misc_vector; 2625 2626 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); 2627 2628 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; 2629 hdev->vector_status[0] = 0; 2630 2631 hdev->num_msi_left -= 1; 2632 hdev->num_msi_used += 1; 2633 } 2634 2635 static int hclge_misc_irq_init(struct hclge_dev *hdev) 2636 { 2637 int ret; 2638 2639 hclge_get_misc_vector(hdev); 2640 2641 /* this would be explicitly freed in the end */ 2642 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, 2643 0, "hclge_misc", hdev); 2644 if (ret) { 2645 hclge_free_vector(hdev, 0); 2646 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", 2647 hdev->misc_vector.vector_irq); 2648 } 2649 2650 return ret; 2651 } 2652 2653 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) 2654 { 2655 free_irq(hdev->misc_vector.vector_irq, hdev); 2656 hclge_free_vector(hdev, 0); 2657 } 2658 2659 static int hclge_notify_client(struct hclge_dev *hdev, 2660 enum hnae3_reset_notify_type type) 2661 { 2662 struct hnae3_client *client = hdev->nic_client; 2663 u16 i; 2664 2665 if (!client->ops->reset_notify) 2666 return -EOPNOTSUPP; 2667 2668 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2669 struct hnae3_handle *handle = &hdev->vport[i].nic; 2670 int ret; 2671 2672 ret = client->ops->reset_notify(handle, type); 2673 if (ret) 2674 return ret; 2675 } 2676 2677 return 0; 2678 } 2679 2680 static int hclge_reset_wait(struct hclge_dev *hdev) 2681 { 2682 #define HCLGE_RESET_WATI_MS 100 2683 #define HCLGE_RESET_WAIT_CNT 5 2684 u32 val, reg, reg_bit; 2685 u32 cnt = 0; 2686 2687 switch (hdev->reset_type) { 2688 case HNAE3_GLOBAL_RESET: 2689 reg = HCLGE_GLOBAL_RESET_REG; 2690 reg_bit = HCLGE_GLOBAL_RESET_BIT; 2691 break; 2692 case HNAE3_CORE_RESET: 2693 reg = HCLGE_GLOBAL_RESET_REG; 2694 reg_bit = HCLGE_CORE_RESET_BIT; 2695 break; 2696 case HNAE3_FUNC_RESET: 2697 reg = HCLGE_FUN_RST_ING; 2698 reg_bit = HCLGE_FUN_RST_ING_B; 2699 break; 2700 default: 2701 dev_err(&hdev->pdev->dev, 2702 "Wait for unsupported reset type: %d\n", 2703 hdev->reset_type); 2704 return -EINVAL; 2705 } 2706 2707 val = hclge_read_dev(&hdev->hw, reg); 2708 while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { 2709 msleep(HCLGE_RESET_WATI_MS); 2710 val = hclge_read_dev(&hdev->hw, reg); 2711 cnt++; 2712 } 2713 2714 if (cnt >= HCLGE_RESET_WAIT_CNT) { 2715 dev_warn(&hdev->pdev->dev, 2716 "Wait for reset timeout: %d\n", hdev->reset_type); 2717 return -EBUSY; 2718 } 2719 2720 return 0; 2721 } 2722 2723 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) 2724 { 2725 struct hclge_desc desc; 2726 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; 2727 int ret; 2728 2729 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 2730 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0); 2731 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); 2732 req->fun_reset_vfid = func_id; 2733 2734 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2735 if (ret) 2736 dev_err(&hdev->pdev->dev, 2737 "send function reset cmd fail, status =%d\n", ret); 2738 2739 return ret; 2740 } 2741 2742 static void hclge_do_reset(struct hclge_dev *hdev) 2743 { 2744 struct pci_dev *pdev = hdev->pdev; 2745 u32 val; 2746 2747 switch (hdev->reset_type) { 2748 case HNAE3_GLOBAL_RESET: 2749 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2750 hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); 2751 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2752 dev_info(&pdev->dev, "Global Reset requested\n"); 2753 break; 2754 case HNAE3_CORE_RESET: 2755 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2756 hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1); 2757 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2758 dev_info(&pdev->dev, "Core Reset requested\n"); 2759 break; 2760 case HNAE3_FUNC_RESET: 2761 dev_info(&pdev->dev, "PF Reset requested\n"); 2762 hclge_func_reset_cmd(hdev, 0); 2763 /* schedule again to check later */ 2764 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); 2765 hclge_reset_task_schedule(hdev); 2766 break; 2767 default: 2768 dev_warn(&pdev->dev, 2769 "Unsupported reset type: %d\n", hdev->reset_type); 2770 break; 2771 } 2772 } 2773 2774 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, 2775 unsigned long *addr) 2776 { 2777 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 2778 2779 /* return the highest priority reset level amongst all */ 2780 if (test_bit(HNAE3_GLOBAL_RESET, addr)) 2781 rst_level = HNAE3_GLOBAL_RESET; 2782 else if (test_bit(HNAE3_CORE_RESET, addr)) 2783 rst_level = HNAE3_CORE_RESET; 2784 else if (test_bit(HNAE3_IMP_RESET, addr)) 2785 rst_level = HNAE3_IMP_RESET; 2786 else if (test_bit(HNAE3_FUNC_RESET, addr)) 2787 rst_level = HNAE3_FUNC_RESET; 2788 2789 /* now, clear all other resets */ 2790 clear_bit(HNAE3_GLOBAL_RESET, addr); 2791 clear_bit(HNAE3_CORE_RESET, addr); 2792 clear_bit(HNAE3_IMP_RESET, addr); 2793 clear_bit(HNAE3_FUNC_RESET, addr); 2794 2795 return rst_level; 2796 } 2797 2798 static void hclge_clear_reset_cause(struct hclge_dev *hdev) 2799 { 2800 u32 clearval = 0; 2801 2802 switch (hdev->reset_type) { 2803 case HNAE3_IMP_RESET: 2804 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 2805 break; 2806 case HNAE3_GLOBAL_RESET: 2807 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 2808 break; 2809 case HNAE3_CORE_RESET: 2810 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); 2811 break; 2812 default: 2813 dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d", 2814 hdev->reset_type); 2815 break; 2816 } 2817 2818 if (!clearval) 2819 return; 2820 2821 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval); 2822 hclge_enable_vector(&hdev->misc_vector, true); 2823 } 2824 2825 static void hclge_reset(struct hclge_dev *hdev) 2826 { 2827 /* perform reset of the stack & ae device for a client */ 2828 2829 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 2830 2831 if (!hclge_reset_wait(hdev)) { 2832 rtnl_lock(); 2833 hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); 2834 hclge_reset_ae_dev(hdev->ae_dev); 2835 hclge_notify_client(hdev, HNAE3_INIT_CLIENT); 2836 rtnl_unlock(); 2837 2838 hclge_clear_reset_cause(hdev); 2839 } else { 2840 /* schedule again to check pending resets later */ 2841 set_bit(hdev->reset_type, &hdev->reset_pending); 2842 hclge_reset_task_schedule(hdev); 2843 } 2844 2845 hclge_notify_client(hdev, HNAE3_UP_CLIENT); 2846 } 2847 2848 static void hclge_reset_event(struct hnae3_handle *handle) 2849 { 2850 struct hclge_vport *vport = hclge_get_vport(handle); 2851 struct hclge_dev *hdev = vport->back; 2852 2853 /* check if this is a new reset request and we are not here just because 2854 * last reset attempt did not succeed and watchdog hit us again. We will 2855 * know this if last reset request did not occur very recently (watchdog 2856 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) 2857 * In case of new request we reset the "reset level" to PF reset. 2858 */ 2859 if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) 2860 handle->reset_level = HNAE3_FUNC_RESET; 2861 2862 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", 2863 handle->reset_level); 2864 2865 /* request reset & schedule reset task */ 2866 set_bit(handle->reset_level, &hdev->reset_request); 2867 hclge_reset_task_schedule(hdev); 2868 2869 if (handle->reset_level < HNAE3_GLOBAL_RESET) 2870 handle->reset_level++; 2871 2872 handle->last_reset_time = jiffies; 2873 } 2874 2875 static void hclge_reset_subtask(struct hclge_dev *hdev) 2876 { 2877 /* check if there is any ongoing reset in the hardware. This status can 2878 * be checked from reset_pending. If there is then, we need to wait for 2879 * hardware to complete reset. 2880 * a. If we are able to figure out in reasonable time that hardware 2881 * has fully resetted then, we can proceed with driver, client 2882 * reset. 2883 * b. else, we can come back later to check this status so re-sched 2884 * now. 2885 */ 2886 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); 2887 if (hdev->reset_type != HNAE3_NONE_RESET) 2888 hclge_reset(hdev); 2889 2890 /* check if we got any *new* reset requests to be honored */ 2891 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); 2892 if (hdev->reset_type != HNAE3_NONE_RESET) 2893 hclge_do_reset(hdev); 2894 2895 hdev->reset_type = HNAE3_NONE_RESET; 2896 } 2897 2898 static void hclge_reset_service_task(struct work_struct *work) 2899 { 2900 struct hclge_dev *hdev = 2901 container_of(work, struct hclge_dev, rst_service_task); 2902 2903 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 2904 return; 2905 2906 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 2907 2908 hclge_reset_subtask(hdev); 2909 2910 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 2911 } 2912 2913 static void hclge_mailbox_service_task(struct work_struct *work) 2914 { 2915 struct hclge_dev *hdev = 2916 container_of(work, struct hclge_dev, mbx_service_task); 2917 2918 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) 2919 return; 2920 2921 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 2922 2923 hclge_mbx_handler(hdev); 2924 2925 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 2926 } 2927 2928 static void hclge_service_task(struct work_struct *work) 2929 { 2930 struct hclge_dev *hdev = 2931 container_of(work, struct hclge_dev, service_task); 2932 2933 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { 2934 hclge_update_stats_for_all(hdev); 2935 hdev->hw_stats.stats_timer = 0; 2936 } 2937 2938 hclge_update_speed_duplex(hdev); 2939 hclge_update_link_status(hdev); 2940 hclge_service_complete(hdev); 2941 } 2942 2943 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) 2944 { 2945 /* VF handle has no client */ 2946 if (!handle->client) 2947 return container_of(handle, struct hclge_vport, nic); 2948 else if (handle->client->type == HNAE3_CLIENT_ROCE) 2949 return container_of(handle, struct hclge_vport, roce); 2950 else 2951 return container_of(handle, struct hclge_vport, nic); 2952 } 2953 2954 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, 2955 struct hnae3_vector_info *vector_info) 2956 { 2957 struct hclge_vport *vport = hclge_get_vport(handle); 2958 struct hnae3_vector_info *vector = vector_info; 2959 struct hclge_dev *hdev = vport->back; 2960 int alloc = 0; 2961 int i, j; 2962 2963 vector_num = min(hdev->num_msi_left, vector_num); 2964 2965 for (j = 0; j < vector_num; j++) { 2966 for (i = 1; i < hdev->num_msi; i++) { 2967 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { 2968 vector->vector = pci_irq_vector(hdev->pdev, i); 2969 vector->io_addr = hdev->hw.io_base + 2970 HCLGE_VECTOR_REG_BASE + 2971 (i - 1) * HCLGE_VECTOR_REG_OFFSET + 2972 vport->vport_id * 2973 HCLGE_VECTOR_VF_OFFSET; 2974 hdev->vector_status[i] = vport->vport_id; 2975 hdev->vector_irq[i] = vector->vector; 2976 2977 vector++; 2978 alloc++; 2979 2980 break; 2981 } 2982 } 2983 } 2984 hdev->num_msi_left -= alloc; 2985 hdev->num_msi_used += alloc; 2986 2987 return alloc; 2988 } 2989 2990 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) 2991 { 2992 int i; 2993 2994 for (i = 0; i < hdev->num_msi; i++) 2995 if (vector == hdev->vector_irq[i]) 2996 return i; 2997 2998 return -EINVAL; 2999 } 3000 3001 static int hclge_put_vector(struct hnae3_handle *handle, int vector) 3002 { 3003 struct hclge_vport *vport = hclge_get_vport(handle); 3004 struct hclge_dev *hdev = vport->back; 3005 int vector_id; 3006 3007 vector_id = hclge_get_vector_index(hdev, vector); 3008 if (vector_id < 0) { 3009 dev_err(&hdev->pdev->dev, 3010 "Get vector index fail. vector_id =%d\n", vector_id); 3011 return vector_id; 3012 } 3013 3014 hclge_free_vector(hdev, vector_id); 3015 3016 return 0; 3017 } 3018 3019 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) 3020 { 3021 return HCLGE_RSS_KEY_SIZE; 3022 } 3023 3024 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) 3025 { 3026 return HCLGE_RSS_IND_TBL_SIZE; 3027 } 3028 3029 static int hclge_set_rss_algo_key(struct hclge_dev *hdev, 3030 const u8 hfunc, const u8 *key) 3031 { 3032 struct hclge_rss_config_cmd *req; 3033 struct hclge_desc desc; 3034 int key_offset; 3035 int key_size; 3036 int ret; 3037 3038 req = (struct hclge_rss_config_cmd *)desc.data; 3039 3040 for (key_offset = 0; key_offset < 3; key_offset++) { 3041 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, 3042 false); 3043 3044 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); 3045 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); 3046 3047 if (key_offset == 2) 3048 key_size = 3049 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; 3050 else 3051 key_size = HCLGE_RSS_HASH_KEY_NUM; 3052 3053 memcpy(req->hash_key, 3054 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); 3055 3056 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3057 if (ret) { 3058 dev_err(&hdev->pdev->dev, 3059 "Configure RSS config fail, status = %d\n", 3060 ret); 3061 return ret; 3062 } 3063 } 3064 return 0; 3065 } 3066 3067 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) 3068 { 3069 struct hclge_rss_indirection_table_cmd *req; 3070 struct hclge_desc desc; 3071 int i, j; 3072 int ret; 3073 3074 req = (struct hclge_rss_indirection_table_cmd *)desc.data; 3075 3076 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { 3077 hclge_cmd_setup_basic_desc 3078 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); 3079 3080 req->start_table_index = 3081 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); 3082 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); 3083 3084 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) 3085 req->rss_result[j] = 3086 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; 3087 3088 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3089 if (ret) { 3090 dev_err(&hdev->pdev->dev, 3091 "Configure rss indir table fail,status = %d\n", 3092 ret); 3093 return ret; 3094 } 3095 } 3096 return 0; 3097 } 3098 3099 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, 3100 u16 *tc_size, u16 *tc_offset) 3101 { 3102 struct hclge_rss_tc_mode_cmd *req; 3103 struct hclge_desc desc; 3104 int ret; 3105 int i; 3106 3107 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); 3108 req = (struct hclge_rss_tc_mode_cmd *)desc.data; 3109 3110 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 3111 u16 mode = 0; 3112 3113 hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); 3114 hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M, 3115 HCLGE_RSS_TC_SIZE_S, tc_size[i]); 3116 hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M, 3117 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); 3118 3119 req->rss_tc_mode[i] = cpu_to_le16(mode); 3120 } 3121 3122 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3123 if (ret) { 3124 dev_err(&hdev->pdev->dev, 3125 "Configure rss tc mode fail, status = %d\n", ret); 3126 return ret; 3127 } 3128 3129 return 0; 3130 } 3131 3132 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) 3133 { 3134 struct hclge_rss_input_tuple_cmd *req; 3135 struct hclge_desc desc; 3136 int ret; 3137 3138 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 3139 3140 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 3141 3142 /* Get the tuple cfg from pf */ 3143 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; 3144 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; 3145 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; 3146 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; 3147 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; 3148 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; 3149 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; 3150 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; 3151 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3152 if (ret) { 3153 dev_err(&hdev->pdev->dev, 3154 "Configure rss input fail, status = %d\n", ret); 3155 return ret; 3156 } 3157 3158 return 0; 3159 } 3160 3161 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, 3162 u8 *key, u8 *hfunc) 3163 { 3164 struct hclge_vport *vport = hclge_get_vport(handle); 3165 int i; 3166 3167 /* Get hash algorithm */ 3168 if (hfunc) 3169 *hfunc = vport->rss_algo; 3170 3171 /* Get the RSS Key required by the user */ 3172 if (key) 3173 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); 3174 3175 /* Get indirect table */ 3176 if (indir) 3177 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3178 indir[i] = vport->rss_indirection_tbl[i]; 3179 3180 return 0; 3181 } 3182 3183 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, 3184 const u8 *key, const u8 hfunc) 3185 { 3186 struct hclge_vport *vport = hclge_get_vport(handle); 3187 struct hclge_dev *hdev = vport->back; 3188 u8 hash_algo; 3189 int ret, i; 3190 3191 /* Set the RSS Hash Key if specififed by the user */ 3192 if (key) { 3193 3194 if (hfunc == ETH_RSS_HASH_TOP || 3195 hfunc == ETH_RSS_HASH_NO_CHANGE) 3196 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 3197 else 3198 return -EINVAL; 3199 ret = hclge_set_rss_algo_key(hdev, hash_algo, key); 3200 if (ret) 3201 return ret; 3202 3203 /* Update the shadow RSS key with user specified qids */ 3204 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); 3205 vport->rss_algo = hash_algo; 3206 } 3207 3208 /* Update the shadow RSS table with user specified qids */ 3209 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3210 vport->rss_indirection_tbl[i] = indir[i]; 3211 3212 /* Update the hardware */ 3213 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); 3214 } 3215 3216 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 3217 { 3218 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; 3219 3220 if (nfc->data & RXH_L4_B_2_3) 3221 hash_sets |= HCLGE_D_PORT_BIT; 3222 else 3223 hash_sets &= ~HCLGE_D_PORT_BIT; 3224 3225 if (nfc->data & RXH_IP_SRC) 3226 hash_sets |= HCLGE_S_IP_BIT; 3227 else 3228 hash_sets &= ~HCLGE_S_IP_BIT; 3229 3230 if (nfc->data & RXH_IP_DST) 3231 hash_sets |= HCLGE_D_IP_BIT; 3232 else 3233 hash_sets &= ~HCLGE_D_IP_BIT; 3234 3235 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 3236 hash_sets |= HCLGE_V_TAG_BIT; 3237 3238 return hash_sets; 3239 } 3240 3241 static int hclge_set_rss_tuple(struct hnae3_handle *handle, 3242 struct ethtool_rxnfc *nfc) 3243 { 3244 struct hclge_vport *vport = hclge_get_vport(handle); 3245 struct hclge_dev *hdev = vport->back; 3246 struct hclge_rss_input_tuple_cmd *req; 3247 struct hclge_desc desc; 3248 u8 tuple_sets; 3249 int ret; 3250 3251 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 3252 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 3253 return -EINVAL; 3254 3255 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 3256 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 3257 3258 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; 3259 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; 3260 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; 3261 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; 3262 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; 3263 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; 3264 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; 3265 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; 3266 3267 tuple_sets = hclge_get_rss_hash_bits(nfc); 3268 switch (nfc->flow_type) { 3269 case TCP_V4_FLOW: 3270 req->ipv4_tcp_en = tuple_sets; 3271 break; 3272 case TCP_V6_FLOW: 3273 req->ipv6_tcp_en = tuple_sets; 3274 break; 3275 case UDP_V4_FLOW: 3276 req->ipv4_udp_en = tuple_sets; 3277 break; 3278 case UDP_V6_FLOW: 3279 req->ipv6_udp_en = tuple_sets; 3280 break; 3281 case SCTP_V4_FLOW: 3282 req->ipv4_sctp_en = tuple_sets; 3283 break; 3284 case SCTP_V6_FLOW: 3285 if ((nfc->data & RXH_L4_B_0_1) || 3286 (nfc->data & RXH_L4_B_2_3)) 3287 return -EINVAL; 3288 3289 req->ipv6_sctp_en = tuple_sets; 3290 break; 3291 case IPV4_FLOW: 3292 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3293 break; 3294 case IPV6_FLOW: 3295 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3296 break; 3297 default: 3298 return -EINVAL; 3299 } 3300 3301 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3302 if (ret) { 3303 dev_err(&hdev->pdev->dev, 3304 "Set rss tuple fail, status = %d\n", ret); 3305 return ret; 3306 } 3307 3308 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 3309 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 3310 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 3311 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 3312 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 3313 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 3314 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 3315 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 3316 return 0; 3317 } 3318 3319 static int hclge_get_rss_tuple(struct hnae3_handle *handle, 3320 struct ethtool_rxnfc *nfc) 3321 { 3322 struct hclge_vport *vport = hclge_get_vport(handle); 3323 u8 tuple_sets; 3324 3325 nfc->data = 0; 3326 3327 switch (nfc->flow_type) { 3328 case TCP_V4_FLOW: 3329 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; 3330 break; 3331 case UDP_V4_FLOW: 3332 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; 3333 break; 3334 case TCP_V6_FLOW: 3335 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; 3336 break; 3337 case UDP_V6_FLOW: 3338 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; 3339 break; 3340 case SCTP_V4_FLOW: 3341 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; 3342 break; 3343 case SCTP_V6_FLOW: 3344 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; 3345 break; 3346 case IPV4_FLOW: 3347 case IPV6_FLOW: 3348 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; 3349 break; 3350 default: 3351 return -EINVAL; 3352 } 3353 3354 if (!tuple_sets) 3355 return 0; 3356 3357 if (tuple_sets & HCLGE_D_PORT_BIT) 3358 nfc->data |= RXH_L4_B_2_3; 3359 if (tuple_sets & HCLGE_S_PORT_BIT) 3360 nfc->data |= RXH_L4_B_0_1; 3361 if (tuple_sets & HCLGE_D_IP_BIT) 3362 nfc->data |= RXH_IP_DST; 3363 if (tuple_sets & HCLGE_S_IP_BIT) 3364 nfc->data |= RXH_IP_SRC; 3365 3366 return 0; 3367 } 3368 3369 static int hclge_get_tc_size(struct hnae3_handle *handle) 3370 { 3371 struct hclge_vport *vport = hclge_get_vport(handle); 3372 struct hclge_dev *hdev = vport->back; 3373 3374 return hdev->rss_size_max; 3375 } 3376 3377 int hclge_rss_init_hw(struct hclge_dev *hdev) 3378 { 3379 struct hclge_vport *vport = hdev->vport; 3380 u8 *rss_indir = vport[0].rss_indirection_tbl; 3381 u16 rss_size = vport[0].alloc_rss_size; 3382 u8 *key = vport[0].rss_hash_key; 3383 u8 hfunc = vport[0].rss_algo; 3384 u16 tc_offset[HCLGE_MAX_TC_NUM]; 3385 u16 tc_valid[HCLGE_MAX_TC_NUM]; 3386 u16 tc_size[HCLGE_MAX_TC_NUM]; 3387 u16 roundup_size; 3388 int i, ret; 3389 3390 ret = hclge_set_rss_indir_table(hdev, rss_indir); 3391 if (ret) 3392 return ret; 3393 3394 ret = hclge_set_rss_algo_key(hdev, hfunc, key); 3395 if (ret) 3396 return ret; 3397 3398 ret = hclge_set_rss_input_tuple(hdev); 3399 if (ret) 3400 return ret; 3401 3402 /* Each TC have the same queue size, and tc_size set to hardware is 3403 * the log2 of roundup power of two of rss_size, the acutal queue 3404 * size is limited by indirection table. 3405 */ 3406 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { 3407 dev_err(&hdev->pdev->dev, 3408 "Configure rss tc size failed, invalid TC_SIZE = %d\n", 3409 rss_size); 3410 return -EINVAL; 3411 } 3412 3413 roundup_size = roundup_pow_of_two(rss_size); 3414 roundup_size = ilog2(roundup_size); 3415 3416 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 3417 tc_valid[i] = 0; 3418 3419 if (!(hdev->hw_tc_map & BIT(i))) 3420 continue; 3421 3422 tc_valid[i] = 1; 3423 tc_size[i] = roundup_size; 3424 tc_offset[i] = rss_size * i; 3425 } 3426 3427 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 3428 } 3429 3430 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) 3431 { 3432 struct hclge_vport *vport = hdev->vport; 3433 int i, j; 3434 3435 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { 3436 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3437 vport[j].rss_indirection_tbl[i] = 3438 i % vport[j].alloc_rss_size; 3439 } 3440 } 3441 3442 static void hclge_rss_init_cfg(struct hclge_dev *hdev) 3443 { 3444 struct hclge_vport *vport = hdev->vport; 3445 int i; 3446 3447 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 3448 vport[i].rss_tuple_sets.ipv4_tcp_en = 3449 HCLGE_RSS_INPUT_TUPLE_OTHER; 3450 vport[i].rss_tuple_sets.ipv4_udp_en = 3451 HCLGE_RSS_INPUT_TUPLE_OTHER; 3452 vport[i].rss_tuple_sets.ipv4_sctp_en = 3453 HCLGE_RSS_INPUT_TUPLE_SCTP; 3454 vport[i].rss_tuple_sets.ipv4_fragment_en = 3455 HCLGE_RSS_INPUT_TUPLE_OTHER; 3456 vport[i].rss_tuple_sets.ipv6_tcp_en = 3457 HCLGE_RSS_INPUT_TUPLE_OTHER; 3458 vport[i].rss_tuple_sets.ipv6_udp_en = 3459 HCLGE_RSS_INPUT_TUPLE_OTHER; 3460 vport[i].rss_tuple_sets.ipv6_sctp_en = 3461 HCLGE_RSS_INPUT_TUPLE_SCTP; 3462 vport[i].rss_tuple_sets.ipv6_fragment_en = 3463 HCLGE_RSS_INPUT_TUPLE_OTHER; 3464 3465 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 3466 3467 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); 3468 } 3469 3470 hclge_rss_indir_init_cfg(hdev); 3471 } 3472 3473 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 3474 int vector_id, bool en, 3475 struct hnae3_ring_chain_node *ring_chain) 3476 { 3477 struct hclge_dev *hdev = vport->back; 3478 struct hnae3_ring_chain_node *node; 3479 struct hclge_desc desc; 3480 struct hclge_ctrl_vector_chain_cmd *req 3481 = (struct hclge_ctrl_vector_chain_cmd *)desc.data; 3482 enum hclge_cmd_status status; 3483 enum hclge_opcode_type op; 3484 u16 tqp_type_and_id; 3485 int i; 3486 3487 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; 3488 hclge_cmd_setup_basic_desc(&desc, op, false); 3489 req->int_vector_id = vector_id; 3490 3491 i = 0; 3492 for (node = ring_chain; node; node = node->next) { 3493 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); 3494 hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, 3495 HCLGE_INT_TYPE_S, 3496 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); 3497 hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, 3498 HCLGE_TQP_ID_S, node->tqp_index); 3499 hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, 3500 HCLGE_INT_GL_IDX_S, 3501 hnae_get_field(node->int_gl_idx, 3502 HNAE3_RING_GL_IDX_M, 3503 HNAE3_RING_GL_IDX_S)); 3504 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); 3505 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 3506 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 3507 req->vfid = vport->vport_id; 3508 3509 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3510 if (status) { 3511 dev_err(&hdev->pdev->dev, 3512 "Map TQP fail, status is %d.\n", 3513 status); 3514 return -EIO; 3515 } 3516 i = 0; 3517 3518 hclge_cmd_setup_basic_desc(&desc, 3519 op, 3520 false); 3521 req->int_vector_id = vector_id; 3522 } 3523 } 3524 3525 if (i > 0) { 3526 req->int_cause_num = i; 3527 req->vfid = vport->vport_id; 3528 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3529 if (status) { 3530 dev_err(&hdev->pdev->dev, 3531 "Map TQP fail, status is %d.\n", status); 3532 return -EIO; 3533 } 3534 } 3535 3536 return 0; 3537 } 3538 3539 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, 3540 int vector, 3541 struct hnae3_ring_chain_node *ring_chain) 3542 { 3543 struct hclge_vport *vport = hclge_get_vport(handle); 3544 struct hclge_dev *hdev = vport->back; 3545 int vector_id; 3546 3547 vector_id = hclge_get_vector_index(hdev, vector); 3548 if (vector_id < 0) { 3549 dev_err(&hdev->pdev->dev, 3550 "Get vector index fail. vector_id =%d\n", vector_id); 3551 return vector_id; 3552 } 3553 3554 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); 3555 } 3556 3557 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, 3558 int vector, 3559 struct hnae3_ring_chain_node *ring_chain) 3560 { 3561 struct hclge_vport *vport = hclge_get_vport(handle); 3562 struct hclge_dev *hdev = vport->back; 3563 int vector_id, ret; 3564 3565 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 3566 return 0; 3567 3568 vector_id = hclge_get_vector_index(hdev, vector); 3569 if (vector_id < 0) { 3570 dev_err(&handle->pdev->dev, 3571 "Get vector index fail. ret =%d\n", vector_id); 3572 return vector_id; 3573 } 3574 3575 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); 3576 if (ret) 3577 dev_err(&handle->pdev->dev, 3578 "Unmap ring from vector fail. vectorid=%d, ret =%d\n", 3579 vector_id, 3580 ret); 3581 3582 return ret; 3583 } 3584 3585 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, 3586 struct hclge_promisc_param *param) 3587 { 3588 struct hclge_promisc_cfg_cmd *req; 3589 struct hclge_desc desc; 3590 int ret; 3591 3592 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); 3593 3594 req = (struct hclge_promisc_cfg_cmd *)desc.data; 3595 req->vf_id = param->vf_id; 3596 3597 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on 3598 * pdev revision(0x20), new revision support them. The 3599 * value of this two fields will not return error when driver 3600 * send command to fireware in revision(0x20). 3601 */ 3602 req->flag = (param->enable << HCLGE_PROMISC_EN_B) | 3603 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; 3604 3605 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3606 if (ret) { 3607 dev_err(&hdev->pdev->dev, 3608 "Set promisc mode fail, status is %d.\n", ret); 3609 return ret; 3610 } 3611 return 0; 3612 } 3613 3614 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, 3615 bool en_mc, bool en_bc, int vport_id) 3616 { 3617 if (!param) 3618 return; 3619 3620 memset(param, 0, sizeof(struct hclge_promisc_param)); 3621 if (en_uc) 3622 param->enable = HCLGE_PROMISC_EN_UC; 3623 if (en_mc) 3624 param->enable |= HCLGE_PROMISC_EN_MC; 3625 if (en_bc) 3626 param->enable |= HCLGE_PROMISC_EN_BC; 3627 param->vf_id = vport_id; 3628 } 3629 3630 static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 3631 bool en_mc_pmc) 3632 { 3633 struct hclge_vport *vport = hclge_get_vport(handle); 3634 struct hclge_dev *hdev = vport->back; 3635 struct hclge_promisc_param param; 3636 3637 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, 3638 vport->vport_id); 3639 hclge_cmd_set_promisc_mode(hdev, ¶m); 3640 } 3641 3642 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) 3643 { 3644 struct hclge_desc desc; 3645 struct hclge_config_mac_mode_cmd *req = 3646 (struct hclge_config_mac_mode_cmd *)desc.data; 3647 u32 loop_en = 0; 3648 int ret; 3649 3650 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); 3651 hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); 3652 hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); 3653 hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); 3654 hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); 3655 hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); 3656 hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); 3657 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); 3658 hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); 3659 hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); 3660 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); 3661 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); 3662 hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); 3663 hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); 3664 hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); 3665 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 3666 3667 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3668 if (ret) 3669 dev_err(&hdev->pdev->dev, 3670 "mac enable fail, ret =%d.\n", ret); 3671 } 3672 3673 static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) 3674 { 3675 struct hclge_config_mac_mode_cmd *req; 3676 struct hclge_desc desc; 3677 u32 loop_en; 3678 int ret; 3679 3680 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; 3681 /* 1 Read out the MAC mode config at first */ 3682 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 3683 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3684 if (ret) { 3685 dev_err(&hdev->pdev->dev, 3686 "mac loopback get fail, ret =%d.\n", ret); 3687 return ret; 3688 } 3689 3690 /* 2 Then setup the loopback flag */ 3691 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 3692 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); 3693 3694 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 3695 3696 /* 3 Config mac work mode with loopback flag 3697 * and its original configure parameters 3698 */ 3699 hclge_cmd_reuse_desc(&desc, false); 3700 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3701 if (ret) 3702 dev_err(&hdev->pdev->dev, 3703 "mac loopback set fail, ret =%d.\n", ret); 3704 return ret; 3705 } 3706 3707 static int hclge_set_loopback(struct hnae3_handle *handle, 3708 enum hnae3_loop loop_mode, bool en) 3709 { 3710 struct hclge_vport *vport = hclge_get_vport(handle); 3711 struct hclge_dev *hdev = vport->back; 3712 int ret; 3713 3714 switch (loop_mode) { 3715 case HNAE3_MAC_INTER_LOOP_MAC: 3716 ret = hclge_set_mac_loopback(hdev, en); 3717 break; 3718 default: 3719 ret = -ENOTSUPP; 3720 dev_err(&hdev->pdev->dev, 3721 "loop_mode %d is not supported\n", loop_mode); 3722 break; 3723 } 3724 3725 return ret; 3726 } 3727 3728 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, 3729 int stream_id, bool enable) 3730 { 3731 struct hclge_desc desc; 3732 struct hclge_cfg_com_tqp_queue_cmd *req = 3733 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; 3734 int ret; 3735 3736 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 3737 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); 3738 req->stream_id = cpu_to_le16(stream_id); 3739 req->enable |= enable << HCLGE_TQP_ENABLE_B; 3740 3741 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3742 if (ret) 3743 dev_err(&hdev->pdev->dev, 3744 "Tqp enable fail, status =%d.\n", ret); 3745 return ret; 3746 } 3747 3748 static void hclge_reset_tqp_stats(struct hnae3_handle *handle) 3749 { 3750 struct hclge_vport *vport = hclge_get_vport(handle); 3751 struct hnae3_queue *queue; 3752 struct hclge_tqp *tqp; 3753 int i; 3754 3755 for (i = 0; i < vport->alloc_tqps; i++) { 3756 queue = handle->kinfo.tqp[i]; 3757 tqp = container_of(queue, struct hclge_tqp, q); 3758 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 3759 } 3760 } 3761 3762 static int hclge_ae_start(struct hnae3_handle *handle) 3763 { 3764 struct hclge_vport *vport = hclge_get_vport(handle); 3765 struct hclge_dev *hdev = vport->back; 3766 int i, ret; 3767 3768 for (i = 0; i < vport->alloc_tqps; i++) 3769 hclge_tqp_enable(hdev, i, 0, true); 3770 3771 /* mac enable */ 3772 hclge_cfg_mac_mode(hdev, true); 3773 clear_bit(HCLGE_STATE_DOWN, &hdev->state); 3774 mod_timer(&hdev->service_timer, jiffies + HZ); 3775 hdev->hw.mac.link = 0; 3776 3777 /* reset tqp stats */ 3778 hclge_reset_tqp_stats(handle); 3779 3780 ret = hclge_mac_start_phy(hdev); 3781 if (ret) 3782 return ret; 3783 3784 return 0; 3785 } 3786 3787 static void hclge_ae_stop(struct hnae3_handle *handle) 3788 { 3789 struct hclge_vport *vport = hclge_get_vport(handle); 3790 struct hclge_dev *hdev = vport->back; 3791 int i; 3792 3793 del_timer_sync(&hdev->service_timer); 3794 cancel_work_sync(&hdev->service_task); 3795 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); 3796 3797 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { 3798 hclge_mac_stop_phy(hdev); 3799 return; 3800 } 3801 3802 for (i = 0; i < vport->alloc_tqps; i++) 3803 hclge_tqp_enable(hdev, i, 0, false); 3804 3805 /* Mac disable */ 3806 hclge_cfg_mac_mode(hdev, false); 3807 3808 hclge_mac_stop_phy(hdev); 3809 3810 /* reset tqp stats */ 3811 hclge_reset_tqp_stats(handle); 3812 del_timer_sync(&hdev->service_timer); 3813 cancel_work_sync(&hdev->service_task); 3814 hclge_update_link_status(hdev); 3815 } 3816 3817 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, 3818 u16 cmdq_resp, u8 resp_code, 3819 enum hclge_mac_vlan_tbl_opcode op) 3820 { 3821 struct hclge_dev *hdev = vport->back; 3822 int return_status = -EIO; 3823 3824 if (cmdq_resp) { 3825 dev_err(&hdev->pdev->dev, 3826 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", 3827 cmdq_resp); 3828 return -EIO; 3829 } 3830 3831 if (op == HCLGE_MAC_VLAN_ADD) { 3832 if ((!resp_code) || (resp_code == 1)) { 3833 return_status = 0; 3834 } else if (resp_code == 2) { 3835 return_status = -ENOSPC; 3836 dev_err(&hdev->pdev->dev, 3837 "add mac addr failed for uc_overflow.\n"); 3838 } else if (resp_code == 3) { 3839 return_status = -ENOSPC; 3840 dev_err(&hdev->pdev->dev, 3841 "add mac addr failed for mc_overflow.\n"); 3842 } else { 3843 dev_err(&hdev->pdev->dev, 3844 "add mac addr failed for undefined, code=%d.\n", 3845 resp_code); 3846 } 3847 } else if (op == HCLGE_MAC_VLAN_REMOVE) { 3848 if (!resp_code) { 3849 return_status = 0; 3850 } else if (resp_code == 1) { 3851 return_status = -ENOENT; 3852 dev_dbg(&hdev->pdev->dev, 3853 "remove mac addr failed for miss.\n"); 3854 } else { 3855 dev_err(&hdev->pdev->dev, 3856 "remove mac addr failed for undefined, code=%d.\n", 3857 resp_code); 3858 } 3859 } else if (op == HCLGE_MAC_VLAN_LKUP) { 3860 if (!resp_code) { 3861 return_status = 0; 3862 } else if (resp_code == 1) { 3863 return_status = -ENOENT; 3864 dev_dbg(&hdev->pdev->dev, 3865 "lookup mac addr failed for miss.\n"); 3866 } else { 3867 dev_err(&hdev->pdev->dev, 3868 "lookup mac addr failed for undefined, code=%d.\n", 3869 resp_code); 3870 } 3871 } else { 3872 return_status = -EINVAL; 3873 dev_err(&hdev->pdev->dev, 3874 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", 3875 op); 3876 } 3877 3878 return return_status; 3879 } 3880 3881 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) 3882 { 3883 int word_num; 3884 int bit_num; 3885 3886 if (vfid > 255 || vfid < 0) 3887 return -EIO; 3888 3889 if (vfid >= 0 && vfid <= 191) { 3890 word_num = vfid / 32; 3891 bit_num = vfid % 32; 3892 if (clr) 3893 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 3894 else 3895 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); 3896 } else { 3897 word_num = (vfid - 192) / 32; 3898 bit_num = vfid % 32; 3899 if (clr) 3900 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 3901 else 3902 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); 3903 } 3904 3905 return 0; 3906 } 3907 3908 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) 3909 { 3910 #define HCLGE_DESC_NUMBER 3 3911 #define HCLGE_FUNC_NUMBER_PER_DESC 6 3912 int i, j; 3913 3914 for (i = 0; i < HCLGE_DESC_NUMBER; i++) 3915 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) 3916 if (desc[i].data[j]) 3917 return false; 3918 3919 return true; 3920 } 3921 3922 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, 3923 const u8 *addr) 3924 { 3925 const unsigned char *mac_addr = addr; 3926 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | 3927 (mac_addr[0]) | (mac_addr[1] << 8); 3928 u32 low_val = mac_addr[4] | (mac_addr[5] << 8); 3929 3930 new_req->mac_addr_hi32 = cpu_to_le32(high_val); 3931 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); 3932 } 3933 3934 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, 3935 const u8 *addr) 3936 { 3937 u16 high_val = addr[1] | (addr[0] << 8); 3938 struct hclge_dev *hdev = vport->back; 3939 u32 rsh = 4 - hdev->mta_mac_sel_type; 3940 u16 ret_val = (high_val >> rsh) & 0xfff; 3941 3942 return ret_val; 3943 } 3944 3945 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 3946 enum hclge_mta_dmac_sel_type mta_mac_sel, 3947 bool enable) 3948 { 3949 struct hclge_mta_filter_mode_cmd *req; 3950 struct hclge_desc desc; 3951 int ret; 3952 3953 req = (struct hclge_mta_filter_mode_cmd *)desc.data; 3954 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); 3955 3956 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, 3957 enable); 3958 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, 3959 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); 3960 3961 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3962 if (ret) { 3963 dev_err(&hdev->pdev->dev, 3964 "Config mat filter mode failed for cmd_send, ret =%d.\n", 3965 ret); 3966 return ret; 3967 } 3968 3969 return 0; 3970 } 3971 3972 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, 3973 u8 func_id, 3974 bool enable) 3975 { 3976 struct hclge_cfg_func_mta_filter_cmd *req; 3977 struct hclge_desc desc; 3978 int ret; 3979 3980 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; 3981 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); 3982 3983 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, 3984 enable); 3985 req->function_id = func_id; 3986 3987 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3988 if (ret) { 3989 dev_err(&hdev->pdev->dev, 3990 "Config func_id enable failed for cmd_send, ret =%d.\n", 3991 ret); 3992 return ret; 3993 } 3994 3995 return 0; 3996 } 3997 3998 static int hclge_set_mta_table_item(struct hclge_vport *vport, 3999 u16 idx, 4000 bool enable) 4001 { 4002 struct hclge_dev *hdev = vport->back; 4003 struct hclge_cfg_func_mta_item_cmd *req; 4004 struct hclge_desc desc; 4005 u16 item_idx = 0; 4006 int ret; 4007 4008 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; 4009 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); 4010 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); 4011 4012 hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, 4013 HCLGE_CFG_MTA_ITEM_IDX_S, idx); 4014 req->item_idx = cpu_to_le16(item_idx); 4015 4016 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4017 if (ret) { 4018 dev_err(&hdev->pdev->dev, 4019 "Config mta table item failed for cmd_send, ret =%d.\n", 4020 ret); 4021 return ret; 4022 } 4023 4024 if (enable) 4025 set_bit(idx, vport->mta_shadow); 4026 else 4027 clear_bit(idx, vport->mta_shadow); 4028 4029 return 0; 4030 } 4031 4032 static int hclge_update_mta_status(struct hnae3_handle *handle) 4033 { 4034 unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; 4035 struct hclge_vport *vport = hclge_get_vport(handle); 4036 struct net_device *netdev = handle->kinfo.netdev; 4037 struct netdev_hw_addr *ha; 4038 u16 tbl_idx; 4039 4040 memset(mta_status, 0, sizeof(mta_status)); 4041 4042 /* update mta_status from mc addr list */ 4043 netdev_for_each_mc_addr(ha, netdev) { 4044 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr); 4045 set_bit(tbl_idx, mta_status); 4046 } 4047 4048 return hclge_update_mta_status_common(vport, mta_status, 4049 0, HCLGE_MTA_TBL_SIZE, true); 4050 } 4051 4052 int hclge_update_mta_status_common(struct hclge_vport *vport, 4053 unsigned long *status, 4054 u16 idx, 4055 u16 count, 4056 bool update_filter) 4057 { 4058 struct hclge_dev *hdev = vport->back; 4059 u16 update_max = idx + count; 4060 u16 check_max; 4061 int ret = 0; 4062 bool used; 4063 u16 i; 4064 4065 /* setup mta check range */ 4066 if (update_filter) { 4067 i = 0; 4068 check_max = HCLGE_MTA_TBL_SIZE; 4069 } else { 4070 i = idx; 4071 check_max = update_max; 4072 } 4073 4074 used = false; 4075 /* check and update all mta item */ 4076 for (; i < check_max; i++) { 4077 /* ignore unused item */ 4078 if (!test_bit(i, vport->mta_shadow)) 4079 continue; 4080 4081 /* if i in update range then update it */ 4082 if (i >= idx && i < update_max) 4083 if (!test_bit(i - idx, status)) 4084 hclge_set_mta_table_item(vport, i, false); 4085 4086 if (!used && test_bit(i, vport->mta_shadow)) 4087 used = true; 4088 } 4089 4090 /* no longer use mta, disable it */ 4091 if (vport->accept_mta_mc && update_filter && !used) { 4092 ret = hclge_cfg_func_mta_filter(hdev, 4093 vport->vport_id, 4094 false); 4095 if (ret) 4096 dev_err(&hdev->pdev->dev, 4097 "disable func mta filter fail ret=%d\n", 4098 ret); 4099 else 4100 vport->accept_mta_mc = false; 4101 } 4102 4103 return ret; 4104 } 4105 4106 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, 4107 struct hclge_mac_vlan_tbl_entry_cmd *req) 4108 { 4109 struct hclge_dev *hdev = vport->back; 4110 struct hclge_desc desc; 4111 u8 resp_code; 4112 u16 retval; 4113 int ret; 4114 4115 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); 4116 4117 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4118 4119 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4120 if (ret) { 4121 dev_err(&hdev->pdev->dev, 4122 "del mac addr failed for cmd_send, ret =%d.\n", 4123 ret); 4124 return ret; 4125 } 4126 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 4127 retval = le16_to_cpu(desc.retval); 4128 4129 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 4130 HCLGE_MAC_VLAN_REMOVE); 4131 } 4132 4133 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, 4134 struct hclge_mac_vlan_tbl_entry_cmd *req, 4135 struct hclge_desc *desc, 4136 bool is_mc) 4137 { 4138 struct hclge_dev *hdev = vport->back; 4139 u8 resp_code; 4140 u16 retval; 4141 int ret; 4142 4143 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); 4144 if (is_mc) { 4145 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4146 memcpy(desc[0].data, 4147 req, 4148 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4149 hclge_cmd_setup_basic_desc(&desc[1], 4150 HCLGE_OPC_MAC_VLAN_ADD, 4151 true); 4152 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4153 hclge_cmd_setup_basic_desc(&desc[2], 4154 HCLGE_OPC_MAC_VLAN_ADD, 4155 true); 4156 ret = hclge_cmd_send(&hdev->hw, desc, 3); 4157 } else { 4158 memcpy(desc[0].data, 4159 req, 4160 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4161 ret = hclge_cmd_send(&hdev->hw, desc, 1); 4162 } 4163 if (ret) { 4164 dev_err(&hdev->pdev->dev, 4165 "lookup mac addr failed for cmd_send, ret =%d.\n", 4166 ret); 4167 return ret; 4168 } 4169 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; 4170 retval = le16_to_cpu(desc[0].retval); 4171 4172 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 4173 HCLGE_MAC_VLAN_LKUP); 4174 } 4175 4176 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, 4177 struct hclge_mac_vlan_tbl_entry_cmd *req, 4178 struct hclge_desc *mc_desc) 4179 { 4180 struct hclge_dev *hdev = vport->back; 4181 int cfg_status; 4182 u8 resp_code; 4183 u16 retval; 4184 int ret; 4185 4186 if (!mc_desc) { 4187 struct hclge_desc desc; 4188 4189 hclge_cmd_setup_basic_desc(&desc, 4190 HCLGE_OPC_MAC_VLAN_ADD, 4191 false); 4192 memcpy(desc.data, req, 4193 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4194 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4195 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 4196 retval = le16_to_cpu(desc.retval); 4197 4198 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 4199 resp_code, 4200 HCLGE_MAC_VLAN_ADD); 4201 } else { 4202 hclge_cmd_reuse_desc(&mc_desc[0], false); 4203 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4204 hclge_cmd_reuse_desc(&mc_desc[1], false); 4205 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4206 hclge_cmd_reuse_desc(&mc_desc[2], false); 4207 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); 4208 memcpy(mc_desc[0].data, req, 4209 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4210 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); 4211 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; 4212 retval = le16_to_cpu(mc_desc[0].retval); 4213 4214 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 4215 resp_code, 4216 HCLGE_MAC_VLAN_ADD); 4217 } 4218 4219 if (ret) { 4220 dev_err(&hdev->pdev->dev, 4221 "add mac addr failed for cmd_send, ret =%d.\n", 4222 ret); 4223 return ret; 4224 } 4225 4226 return cfg_status; 4227 } 4228 4229 static int hclge_add_uc_addr(struct hnae3_handle *handle, 4230 const unsigned char *addr) 4231 { 4232 struct hclge_vport *vport = hclge_get_vport(handle); 4233 4234 return hclge_add_uc_addr_common(vport, addr); 4235 } 4236 4237 int hclge_add_uc_addr_common(struct hclge_vport *vport, 4238 const unsigned char *addr) 4239 { 4240 struct hclge_dev *hdev = vport->back; 4241 struct hclge_mac_vlan_tbl_entry_cmd req; 4242 struct hclge_desc desc; 4243 u16 egress_port = 0; 4244 int ret; 4245 4246 /* mac addr check */ 4247 if (is_zero_ether_addr(addr) || 4248 is_broadcast_ether_addr(addr) || 4249 is_multicast_ether_addr(addr)) { 4250 dev_err(&hdev->pdev->dev, 4251 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", 4252 addr, 4253 is_zero_ether_addr(addr), 4254 is_broadcast_ether_addr(addr), 4255 is_multicast_ether_addr(addr)); 4256 return -EINVAL; 4257 } 4258 4259 memset(&req, 0, sizeof(req)); 4260 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4261 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4262 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); 4263 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4264 4265 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0); 4266 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0); 4267 hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, 4268 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); 4269 hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M, 4270 HCLGE_MAC_EPORT_PFID_S, 0); 4271 4272 req.egress_port = cpu_to_le16(egress_port); 4273 4274 hclge_prepare_mac_addr(&req, addr); 4275 4276 /* Lookup the mac address in the mac_vlan table, and add 4277 * it if the entry is inexistent. Repeated unicast entry 4278 * is not allowed in the mac vlan table. 4279 */ 4280 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); 4281 if (ret == -ENOENT) 4282 return hclge_add_mac_vlan_tbl(vport, &req, NULL); 4283 4284 /* check if we just hit the duplicate */ 4285 if (!ret) 4286 ret = -EINVAL; 4287 4288 dev_err(&hdev->pdev->dev, 4289 "PF failed to add unicast entry(%pM) in the MAC table\n", 4290 addr); 4291 4292 return ret; 4293 } 4294 4295 static int hclge_rm_uc_addr(struct hnae3_handle *handle, 4296 const unsigned char *addr) 4297 { 4298 struct hclge_vport *vport = hclge_get_vport(handle); 4299 4300 return hclge_rm_uc_addr_common(vport, addr); 4301 } 4302 4303 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 4304 const unsigned char *addr) 4305 { 4306 struct hclge_dev *hdev = vport->back; 4307 struct hclge_mac_vlan_tbl_entry_cmd req; 4308 int ret; 4309 4310 /* mac addr check */ 4311 if (is_zero_ether_addr(addr) || 4312 is_broadcast_ether_addr(addr) || 4313 is_multicast_ether_addr(addr)) { 4314 dev_dbg(&hdev->pdev->dev, 4315 "Remove mac err! invalid mac:%pM.\n", 4316 addr); 4317 return -EINVAL; 4318 } 4319 4320 memset(&req, 0, sizeof(req)); 4321 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4322 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4323 hclge_prepare_mac_addr(&req, addr); 4324 ret = hclge_remove_mac_vlan_tbl(vport, &req); 4325 4326 return ret; 4327 } 4328 4329 static int hclge_add_mc_addr(struct hnae3_handle *handle, 4330 const unsigned char *addr) 4331 { 4332 struct hclge_vport *vport = hclge_get_vport(handle); 4333 4334 return hclge_add_mc_addr_common(vport, addr); 4335 } 4336 4337 int hclge_add_mc_addr_common(struct hclge_vport *vport, 4338 const unsigned char *addr) 4339 { 4340 struct hclge_dev *hdev = vport->back; 4341 struct hclge_mac_vlan_tbl_entry_cmd req; 4342 struct hclge_desc desc[3]; 4343 u16 tbl_idx; 4344 int status; 4345 4346 /* mac addr check */ 4347 if (!is_multicast_ether_addr(addr)) { 4348 dev_err(&hdev->pdev->dev, 4349 "Add mc mac err! invalid mac:%pM.\n", 4350 addr); 4351 return -EINVAL; 4352 } 4353 memset(&req, 0, sizeof(req)); 4354 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4355 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4356 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 4357 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4358 hclge_prepare_mac_addr(&req, addr); 4359 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 4360 if (!status) { 4361 /* This mac addr exist, update VFID for it */ 4362 hclge_update_desc_vfid(desc, vport->vport_id, false); 4363 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4364 } else { 4365 /* This mac addr do not exist, add new entry for it */ 4366 memset(desc[0].data, 0, sizeof(desc[0].data)); 4367 memset(desc[1].data, 0, sizeof(desc[0].data)); 4368 memset(desc[2].data, 0, sizeof(desc[0].data)); 4369 hclge_update_desc_vfid(desc, vport->vport_id, false); 4370 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4371 } 4372 4373 /* If mc mac vlan table is full, use MTA table */ 4374 if (status == -ENOSPC) { 4375 if (!vport->accept_mta_mc) { 4376 status = hclge_cfg_func_mta_filter(hdev, 4377 vport->vport_id, 4378 true); 4379 if (status) { 4380 dev_err(&hdev->pdev->dev, 4381 "set mta filter mode fail ret=%d\n", 4382 status); 4383 return status; 4384 } 4385 vport->accept_mta_mc = true; 4386 } 4387 4388 /* Set MTA table for this MAC address */ 4389 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); 4390 status = hclge_set_mta_table_item(vport, tbl_idx, true); 4391 } 4392 4393 return status; 4394 } 4395 4396 static int hclge_rm_mc_addr(struct hnae3_handle *handle, 4397 const unsigned char *addr) 4398 { 4399 struct hclge_vport *vport = hclge_get_vport(handle); 4400 4401 return hclge_rm_mc_addr_common(vport, addr); 4402 } 4403 4404 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 4405 const unsigned char *addr) 4406 { 4407 struct hclge_dev *hdev = vport->back; 4408 struct hclge_mac_vlan_tbl_entry_cmd req; 4409 enum hclge_cmd_status status; 4410 struct hclge_desc desc[3]; 4411 4412 /* mac addr check */ 4413 if (!is_multicast_ether_addr(addr)) { 4414 dev_dbg(&hdev->pdev->dev, 4415 "Remove mc mac err! invalid mac:%pM.\n", 4416 addr); 4417 return -EINVAL; 4418 } 4419 4420 memset(&req, 0, sizeof(req)); 4421 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4422 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4423 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 4424 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4425 hclge_prepare_mac_addr(&req, addr); 4426 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 4427 if (!status) { 4428 /* This mac addr exist, remove this handle's VFID for it */ 4429 hclge_update_desc_vfid(desc, vport->vport_id, true); 4430 4431 if (hclge_is_all_function_id_zero(desc)) 4432 /* All the vfid is zero, so need to delete this entry */ 4433 status = hclge_remove_mac_vlan_tbl(vport, &req); 4434 else 4435 /* Not all the vfid is zero, update the vfid */ 4436 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4437 4438 } else { 4439 /* Maybe this mac address is in mta table, but it cannot be 4440 * deleted here because an entry of mta represents an address 4441 * range rather than a specific address. the delete action to 4442 * all entries will take effect in update_mta_status called by 4443 * hns3_nic_set_rx_mode. 4444 */ 4445 status = 0; 4446 } 4447 4448 return status; 4449 } 4450 4451 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, 4452 u16 cmdq_resp, u8 resp_code) 4453 { 4454 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 4455 #define HCLGE_ETHERTYPE_ALREADY_ADD 1 4456 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 4457 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 4458 4459 int return_status; 4460 4461 if (cmdq_resp) { 4462 dev_err(&hdev->pdev->dev, 4463 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", 4464 cmdq_resp); 4465 return -EIO; 4466 } 4467 4468 switch (resp_code) { 4469 case HCLGE_ETHERTYPE_SUCCESS_ADD: 4470 case HCLGE_ETHERTYPE_ALREADY_ADD: 4471 return_status = 0; 4472 break; 4473 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: 4474 dev_err(&hdev->pdev->dev, 4475 "add mac ethertype failed for manager table overflow.\n"); 4476 return_status = -EIO; 4477 break; 4478 case HCLGE_ETHERTYPE_KEY_CONFLICT: 4479 dev_err(&hdev->pdev->dev, 4480 "add mac ethertype failed for key conflict.\n"); 4481 return_status = -EIO; 4482 break; 4483 default: 4484 dev_err(&hdev->pdev->dev, 4485 "add mac ethertype failed for undefined, code=%d.\n", 4486 resp_code); 4487 return_status = -EIO; 4488 } 4489 4490 return return_status; 4491 } 4492 4493 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, 4494 const struct hclge_mac_mgr_tbl_entry_cmd *req) 4495 { 4496 struct hclge_desc desc; 4497 u8 resp_code; 4498 u16 retval; 4499 int ret; 4500 4501 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); 4502 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); 4503 4504 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4505 if (ret) { 4506 dev_err(&hdev->pdev->dev, 4507 "add mac ethertype failed for cmd_send, ret =%d.\n", 4508 ret); 4509 return ret; 4510 } 4511 4512 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 4513 retval = le16_to_cpu(desc.retval); 4514 4515 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); 4516 } 4517 4518 static int init_mgr_tbl(struct hclge_dev *hdev) 4519 { 4520 int ret; 4521 int i; 4522 4523 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { 4524 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); 4525 if (ret) { 4526 dev_err(&hdev->pdev->dev, 4527 "add mac ethertype failed, ret =%d.\n", 4528 ret); 4529 return ret; 4530 } 4531 } 4532 4533 return 0; 4534 } 4535 4536 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) 4537 { 4538 struct hclge_vport *vport = hclge_get_vport(handle); 4539 struct hclge_dev *hdev = vport->back; 4540 4541 ether_addr_copy(p, hdev->hw.mac.mac_addr); 4542 } 4543 4544 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, 4545 bool is_first) 4546 { 4547 const unsigned char *new_addr = (const unsigned char *)p; 4548 struct hclge_vport *vport = hclge_get_vport(handle); 4549 struct hclge_dev *hdev = vport->back; 4550 int ret; 4551 4552 /* mac addr check */ 4553 if (is_zero_ether_addr(new_addr) || 4554 is_broadcast_ether_addr(new_addr) || 4555 is_multicast_ether_addr(new_addr)) { 4556 dev_err(&hdev->pdev->dev, 4557 "Change uc mac err! invalid mac:%p.\n", 4558 new_addr); 4559 return -EINVAL; 4560 } 4561 4562 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) 4563 dev_warn(&hdev->pdev->dev, 4564 "remove old uc mac address fail.\n"); 4565 4566 ret = hclge_add_uc_addr(handle, new_addr); 4567 if (ret) { 4568 dev_err(&hdev->pdev->dev, 4569 "add uc mac address fail, ret =%d.\n", 4570 ret); 4571 4572 if (!is_first && 4573 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) 4574 dev_err(&hdev->pdev->dev, 4575 "restore uc mac address fail.\n"); 4576 4577 return -EIO; 4578 } 4579 4580 ret = hclge_pause_addr_cfg(hdev, new_addr); 4581 if (ret) { 4582 dev_err(&hdev->pdev->dev, 4583 "configure mac pause address fail, ret =%d.\n", 4584 ret); 4585 return -EIO; 4586 } 4587 4588 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); 4589 4590 return 0; 4591 } 4592 4593 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, 4594 bool filter_en) 4595 { 4596 struct hclge_vlan_filter_ctrl_cmd *req; 4597 struct hclge_desc desc; 4598 int ret; 4599 4600 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); 4601 4602 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; 4603 req->vlan_type = vlan_type; 4604 req->vlan_fe = filter_en; 4605 4606 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4607 if (ret) { 4608 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", 4609 ret); 4610 return ret; 4611 } 4612 4613 return 0; 4614 } 4615 4616 #define HCLGE_FILTER_TYPE_VF 0 4617 #define HCLGE_FILTER_TYPE_PORT 1 4618 4619 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 4620 { 4621 struct hclge_vport *vport = hclge_get_vport(handle); 4622 struct hclge_dev *hdev = vport->back; 4623 4624 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable); 4625 } 4626 4627 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, 4628 bool is_kill, u16 vlan, u8 qos, 4629 __be16 proto) 4630 { 4631 #define HCLGE_MAX_VF_BYTES 16 4632 struct hclge_vlan_filter_vf_cfg_cmd *req0; 4633 struct hclge_vlan_filter_vf_cfg_cmd *req1; 4634 struct hclge_desc desc[2]; 4635 u8 vf_byte_val; 4636 u8 vf_byte_off; 4637 int ret; 4638 4639 hclge_cmd_setup_basic_desc(&desc[0], 4640 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 4641 hclge_cmd_setup_basic_desc(&desc[1], 4642 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 4643 4644 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4645 4646 vf_byte_off = vfid / 8; 4647 vf_byte_val = 1 << (vfid % 8); 4648 4649 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 4650 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; 4651 4652 req0->vlan_id = cpu_to_le16(vlan); 4653 req0->vlan_cfg = is_kill; 4654 4655 if (vf_byte_off < HCLGE_MAX_VF_BYTES) 4656 req0->vf_bitmap[vf_byte_off] = vf_byte_val; 4657 else 4658 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; 4659 4660 ret = hclge_cmd_send(&hdev->hw, desc, 2); 4661 if (ret) { 4662 dev_err(&hdev->pdev->dev, 4663 "Send vf vlan command fail, ret =%d.\n", 4664 ret); 4665 return ret; 4666 } 4667 4668 if (!is_kill) { 4669 #define HCLGE_VF_VLAN_NO_ENTRY 2 4670 if (!req0->resp_code || req0->resp_code == 1) 4671 return 0; 4672 4673 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { 4674 dev_warn(&hdev->pdev->dev, 4675 "vf vlan table is full, vf vlan filter is disabled\n"); 4676 return 0; 4677 } 4678 4679 dev_err(&hdev->pdev->dev, 4680 "Add vf vlan filter fail, ret =%d.\n", 4681 req0->resp_code); 4682 } else { 4683 if (!req0->resp_code) 4684 return 0; 4685 4686 dev_err(&hdev->pdev->dev, 4687 "Kill vf vlan filter fail, ret =%d.\n", 4688 req0->resp_code); 4689 } 4690 4691 return -EIO; 4692 } 4693 4694 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, 4695 u16 vlan_id, bool is_kill) 4696 { 4697 struct hclge_vlan_filter_pf_cfg_cmd *req; 4698 struct hclge_desc desc; 4699 u8 vlan_offset_byte_val; 4700 u8 vlan_offset_byte; 4701 u8 vlan_offset_160; 4702 int ret; 4703 4704 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); 4705 4706 vlan_offset_160 = vlan_id / 160; 4707 vlan_offset_byte = (vlan_id % 160) / 8; 4708 vlan_offset_byte_val = 1 << (vlan_id % 8); 4709 4710 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; 4711 req->vlan_offset = vlan_offset_160; 4712 req->vlan_cfg = is_kill; 4713 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 4714 4715 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4716 if (ret) 4717 dev_err(&hdev->pdev->dev, 4718 "port vlan command, send fail, ret =%d.\n", ret); 4719 return ret; 4720 } 4721 4722 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, 4723 u16 vport_id, u16 vlan_id, u8 qos, 4724 bool is_kill) 4725 { 4726 u16 vport_idx, vport_num = 0; 4727 int ret; 4728 4729 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, 4730 0, proto); 4731 if (ret) { 4732 dev_err(&hdev->pdev->dev, 4733 "Set %d vport vlan filter config fail, ret =%d.\n", 4734 vport_id, ret); 4735 return ret; 4736 } 4737 4738 /* vlan 0 may be added twice when 8021q module is enabled */ 4739 if (!is_kill && !vlan_id && 4740 test_bit(vport_id, hdev->vlan_table[vlan_id])) 4741 return 0; 4742 4743 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { 4744 dev_err(&hdev->pdev->dev, 4745 "Add port vlan failed, vport %d is already in vlan %d\n", 4746 vport_id, vlan_id); 4747 return -EINVAL; 4748 } 4749 4750 if (is_kill && 4751 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { 4752 dev_err(&hdev->pdev->dev, 4753 "Delete port vlan failed, vport %d is not in vlan %d\n", 4754 vport_id, vlan_id); 4755 return -EINVAL; 4756 } 4757 4758 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID) 4759 vport_num++; 4760 4761 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) 4762 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, 4763 is_kill); 4764 4765 return ret; 4766 } 4767 4768 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 4769 u16 vlan_id, bool is_kill) 4770 { 4771 struct hclge_vport *vport = hclge_get_vport(handle); 4772 struct hclge_dev *hdev = vport->back; 4773 4774 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, 4775 0, is_kill); 4776 } 4777 4778 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, 4779 u16 vlan, u8 qos, __be16 proto) 4780 { 4781 struct hclge_vport *vport = hclge_get_vport(handle); 4782 struct hclge_dev *hdev = vport->back; 4783 4784 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) 4785 return -EINVAL; 4786 if (proto != htons(ETH_P_8021Q)) 4787 return -EPROTONOSUPPORT; 4788 4789 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); 4790 } 4791 4792 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) 4793 { 4794 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; 4795 struct hclge_vport_vtag_tx_cfg_cmd *req; 4796 struct hclge_dev *hdev = vport->back; 4797 struct hclge_desc desc; 4798 int status; 4799 4800 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); 4801 4802 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; 4803 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); 4804 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); 4805 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, 4806 vcfg->accept_tag1 ? 1 : 0); 4807 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, 4808 vcfg->accept_untag1 ? 1 : 0); 4809 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, 4810 vcfg->accept_tag2 ? 1 : 0); 4811 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, 4812 vcfg->accept_untag2 ? 1 : 0); 4813 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, 4814 vcfg->insert_tag1_en ? 1 : 0); 4815 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, 4816 vcfg->insert_tag2_en ? 1 : 0); 4817 hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); 4818 4819 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 4820 req->vf_bitmap[req->vf_offset] = 4821 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 4822 4823 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4824 if (status) 4825 dev_err(&hdev->pdev->dev, 4826 "Send port txvlan cfg command fail, ret =%d\n", 4827 status); 4828 4829 return status; 4830 } 4831 4832 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) 4833 { 4834 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; 4835 struct hclge_vport_vtag_rx_cfg_cmd *req; 4836 struct hclge_dev *hdev = vport->back; 4837 struct hclge_desc desc; 4838 int status; 4839 4840 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); 4841 4842 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; 4843 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, 4844 vcfg->strip_tag1_en ? 1 : 0); 4845 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, 4846 vcfg->strip_tag2_en ? 1 : 0); 4847 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, 4848 vcfg->vlan1_vlan_prionly ? 1 : 0); 4849 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, 4850 vcfg->vlan2_vlan_prionly ? 1 : 0); 4851 4852 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 4853 req->vf_bitmap[req->vf_offset] = 4854 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 4855 4856 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4857 if (status) 4858 dev_err(&hdev->pdev->dev, 4859 "Send port rxvlan cfg command fail, ret =%d\n", 4860 status); 4861 4862 return status; 4863 } 4864 4865 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) 4866 { 4867 struct hclge_rx_vlan_type_cfg_cmd *rx_req; 4868 struct hclge_tx_vlan_type_cfg_cmd *tx_req; 4869 struct hclge_desc desc; 4870 int status; 4871 4872 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); 4873 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; 4874 rx_req->ot_fst_vlan_type = 4875 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); 4876 rx_req->ot_sec_vlan_type = 4877 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); 4878 rx_req->in_fst_vlan_type = 4879 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); 4880 rx_req->in_sec_vlan_type = 4881 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); 4882 4883 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4884 if (status) { 4885 dev_err(&hdev->pdev->dev, 4886 "Send rxvlan protocol type command fail, ret =%d\n", 4887 status); 4888 return status; 4889 } 4890 4891 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); 4892 4893 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data; 4894 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); 4895 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); 4896 4897 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4898 if (status) 4899 dev_err(&hdev->pdev->dev, 4900 "Send txvlan protocol type command fail, ret =%d\n", 4901 status); 4902 4903 return status; 4904 } 4905 4906 static int hclge_init_vlan_config(struct hclge_dev *hdev) 4907 { 4908 #define HCLGE_DEF_VLAN_TYPE 0x8100 4909 4910 struct hnae3_handle *handle; 4911 struct hclge_vport *vport; 4912 int ret; 4913 int i; 4914 4915 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true); 4916 if (ret) 4917 return ret; 4918 4919 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true); 4920 if (ret) 4921 return ret; 4922 4923 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 4924 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 4925 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 4926 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 4927 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; 4928 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; 4929 4930 ret = hclge_set_vlan_protocol_type(hdev); 4931 if (ret) 4932 return ret; 4933 4934 for (i = 0; i < hdev->num_alloc_vport; i++) { 4935 vport = &hdev->vport[i]; 4936 vport->txvlan_cfg.accept_tag1 = true; 4937 vport->txvlan_cfg.accept_untag1 = true; 4938 4939 /* accept_tag2 and accept_untag2 are not supported on 4940 * pdev revision(0x20), new revision support them. The 4941 * value of this two fields will not return error when driver 4942 * send command to fireware in revision(0x20). 4943 * This two fields can not configured by user. 4944 */ 4945 vport->txvlan_cfg.accept_tag2 = true; 4946 vport->txvlan_cfg.accept_untag2 = true; 4947 4948 vport->txvlan_cfg.insert_tag1_en = false; 4949 vport->txvlan_cfg.insert_tag2_en = false; 4950 vport->txvlan_cfg.default_tag1 = 0; 4951 vport->txvlan_cfg.default_tag2 = 0; 4952 4953 ret = hclge_set_vlan_tx_offload_cfg(vport); 4954 if (ret) 4955 return ret; 4956 4957 vport->rxvlan_cfg.strip_tag1_en = false; 4958 vport->rxvlan_cfg.strip_tag2_en = true; 4959 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 4960 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 4961 4962 ret = hclge_set_vlan_rx_offload_cfg(vport); 4963 if (ret) 4964 return ret; 4965 } 4966 4967 handle = &hdev->vport[0].nic; 4968 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); 4969 } 4970 4971 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 4972 { 4973 struct hclge_vport *vport = hclge_get_vport(handle); 4974 4975 vport->rxvlan_cfg.strip_tag1_en = false; 4976 vport->rxvlan_cfg.strip_tag2_en = enable; 4977 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 4978 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 4979 4980 return hclge_set_vlan_rx_offload_cfg(vport); 4981 } 4982 4983 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) 4984 { 4985 struct hclge_config_max_frm_size_cmd *req; 4986 struct hclge_desc desc; 4987 int max_frm_size; 4988 int ret; 4989 4990 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 4991 4992 if (max_frm_size < HCLGE_MAC_MIN_FRAME || 4993 max_frm_size > HCLGE_MAC_MAX_FRAME) 4994 return -EINVAL; 4995 4996 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); 4997 4998 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); 4999 5000 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 5001 req->max_frm_size = cpu_to_le16(max_frm_size); 5002 5003 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5004 if (ret) { 5005 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); 5006 return ret; 5007 } 5008 5009 hdev->mps = max_frm_size; 5010 5011 return 0; 5012 } 5013 5014 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) 5015 { 5016 struct hclge_vport *vport = hclge_get_vport(handle); 5017 struct hclge_dev *hdev = vport->back; 5018 int ret; 5019 5020 ret = hclge_set_mac_mtu(hdev, new_mtu); 5021 if (ret) { 5022 dev_err(&hdev->pdev->dev, 5023 "Change mtu fail, ret =%d\n", ret); 5024 return ret; 5025 } 5026 5027 ret = hclge_buffer_alloc(hdev); 5028 if (ret) 5029 dev_err(&hdev->pdev->dev, 5030 "Allocate buffer fail, ret =%d\n", ret); 5031 5032 return ret; 5033 } 5034 5035 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, 5036 bool enable) 5037 { 5038 struct hclge_reset_tqp_queue_cmd *req; 5039 struct hclge_desc desc; 5040 int ret; 5041 5042 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); 5043 5044 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 5045 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 5046 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); 5047 5048 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5049 if (ret) { 5050 dev_err(&hdev->pdev->dev, 5051 "Send tqp reset cmd error, status =%d\n", ret); 5052 return ret; 5053 } 5054 5055 return 0; 5056 } 5057 5058 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) 5059 { 5060 struct hclge_reset_tqp_queue_cmd *req; 5061 struct hclge_desc desc; 5062 int ret; 5063 5064 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); 5065 5066 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 5067 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 5068 5069 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5070 if (ret) { 5071 dev_err(&hdev->pdev->dev, 5072 "Get reset status error, status =%d\n", ret); 5073 return ret; 5074 } 5075 5076 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 5077 } 5078 5079 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, 5080 u16 queue_id) 5081 { 5082 struct hnae3_queue *queue; 5083 struct hclge_tqp *tqp; 5084 5085 queue = handle->kinfo.tqp[queue_id]; 5086 tqp = container_of(queue, struct hclge_tqp, q); 5087 5088 return tqp->index; 5089 } 5090 5091 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 5092 { 5093 struct hclge_vport *vport = hclge_get_vport(handle); 5094 struct hclge_dev *hdev = vport->back; 5095 int reset_try_times = 0; 5096 int reset_status; 5097 u16 queue_gid; 5098 int ret; 5099 5100 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 5101 return; 5102 5103 queue_gid = hclge_covert_handle_qid_global(handle, queue_id); 5104 5105 ret = hclge_tqp_enable(hdev, queue_id, 0, false); 5106 if (ret) { 5107 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); 5108 return; 5109 } 5110 5111 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 5112 if (ret) { 5113 dev_warn(&hdev->pdev->dev, 5114 "Send reset tqp cmd fail, ret = %d\n", ret); 5115 return; 5116 } 5117 5118 reset_try_times = 0; 5119 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 5120 /* Wait for tqp hw reset */ 5121 msleep(20); 5122 reset_status = hclge_get_reset_status(hdev, queue_gid); 5123 if (reset_status) 5124 break; 5125 } 5126 5127 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 5128 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 5129 return; 5130 } 5131 5132 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 5133 if (ret) { 5134 dev_warn(&hdev->pdev->dev, 5135 "Deassert the soft reset fail, ret = %d\n", ret); 5136 return; 5137 } 5138 } 5139 5140 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) 5141 { 5142 struct hclge_dev *hdev = vport->back; 5143 int reset_try_times = 0; 5144 int reset_status; 5145 u16 queue_gid; 5146 int ret; 5147 5148 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); 5149 5150 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 5151 if (ret) { 5152 dev_warn(&hdev->pdev->dev, 5153 "Send reset tqp cmd fail, ret = %d\n", ret); 5154 return; 5155 } 5156 5157 reset_try_times = 0; 5158 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 5159 /* Wait for tqp hw reset */ 5160 msleep(20); 5161 reset_status = hclge_get_reset_status(hdev, queue_gid); 5162 if (reset_status) 5163 break; 5164 } 5165 5166 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 5167 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 5168 return; 5169 } 5170 5171 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 5172 if (ret) 5173 dev_warn(&hdev->pdev->dev, 5174 "Deassert the soft reset fail, ret = %d\n", ret); 5175 } 5176 5177 static u32 hclge_get_fw_version(struct hnae3_handle *handle) 5178 { 5179 struct hclge_vport *vport = hclge_get_vport(handle); 5180 struct hclge_dev *hdev = vport->back; 5181 5182 return hdev->fw_version; 5183 } 5184 5185 static void hclge_get_flowctrl_adv(struct hnae3_handle *handle, 5186 u32 *flowctrl_adv) 5187 { 5188 struct hclge_vport *vport = hclge_get_vport(handle); 5189 struct hclge_dev *hdev = vport->back; 5190 struct phy_device *phydev = hdev->hw.mac.phydev; 5191 5192 if (!phydev) 5193 return; 5194 5195 *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) | 5196 (phydev->advertising & ADVERTISED_Asym_Pause); 5197 } 5198 5199 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 5200 { 5201 struct phy_device *phydev = hdev->hw.mac.phydev; 5202 5203 if (!phydev) 5204 return; 5205 5206 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); 5207 5208 if (rx_en) 5209 phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; 5210 5211 if (tx_en) 5212 phydev->advertising ^= ADVERTISED_Asym_Pause; 5213 } 5214 5215 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 5216 { 5217 int ret; 5218 5219 if (rx_en && tx_en) 5220 hdev->fc_mode_last_time = HCLGE_FC_FULL; 5221 else if (rx_en && !tx_en) 5222 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; 5223 else if (!rx_en && tx_en) 5224 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; 5225 else 5226 hdev->fc_mode_last_time = HCLGE_FC_NONE; 5227 5228 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 5229 return 0; 5230 5231 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 5232 if (ret) { 5233 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", 5234 ret); 5235 return ret; 5236 } 5237 5238 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 5239 5240 return 0; 5241 } 5242 5243 int hclge_cfg_flowctrl(struct hclge_dev *hdev) 5244 { 5245 struct phy_device *phydev = hdev->hw.mac.phydev; 5246 u16 remote_advertising = 0; 5247 u16 local_advertising = 0; 5248 u32 rx_pause, tx_pause; 5249 u8 flowctl; 5250 5251 if (!phydev->link || !phydev->autoneg) 5252 return 0; 5253 5254 if (phydev->advertising & ADVERTISED_Pause) 5255 local_advertising = ADVERTISE_PAUSE_CAP; 5256 5257 if (phydev->advertising & ADVERTISED_Asym_Pause) 5258 local_advertising |= ADVERTISE_PAUSE_ASYM; 5259 5260 if (phydev->pause) 5261 remote_advertising = LPA_PAUSE_CAP; 5262 5263 if (phydev->asym_pause) 5264 remote_advertising |= LPA_PAUSE_ASYM; 5265 5266 flowctl = mii_resolve_flowctrl_fdx(local_advertising, 5267 remote_advertising); 5268 tx_pause = flowctl & FLOW_CTRL_TX; 5269 rx_pause = flowctl & FLOW_CTRL_RX; 5270 5271 if (phydev->duplex == HCLGE_MAC_HALF) { 5272 tx_pause = 0; 5273 rx_pause = 0; 5274 } 5275 5276 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); 5277 } 5278 5279 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, 5280 u32 *rx_en, u32 *tx_en) 5281 { 5282 struct hclge_vport *vport = hclge_get_vport(handle); 5283 struct hclge_dev *hdev = vport->back; 5284 5285 *auto_neg = hclge_get_autoneg(handle); 5286 5287 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 5288 *rx_en = 0; 5289 *tx_en = 0; 5290 return; 5291 } 5292 5293 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { 5294 *rx_en = 1; 5295 *tx_en = 0; 5296 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { 5297 *tx_en = 1; 5298 *rx_en = 0; 5299 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { 5300 *rx_en = 1; 5301 *tx_en = 1; 5302 } else { 5303 *rx_en = 0; 5304 *tx_en = 0; 5305 } 5306 } 5307 5308 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, 5309 u32 rx_en, u32 tx_en) 5310 { 5311 struct hclge_vport *vport = hclge_get_vport(handle); 5312 struct hclge_dev *hdev = vport->back; 5313 struct phy_device *phydev = hdev->hw.mac.phydev; 5314 u32 fc_autoneg; 5315 5316 fc_autoneg = hclge_get_autoneg(handle); 5317 if (auto_neg != fc_autoneg) { 5318 dev_info(&hdev->pdev->dev, 5319 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 5320 return -EOPNOTSUPP; 5321 } 5322 5323 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 5324 dev_info(&hdev->pdev->dev, 5325 "Priority flow control enabled. Cannot set link flow control.\n"); 5326 return -EOPNOTSUPP; 5327 } 5328 5329 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); 5330 5331 if (!fc_autoneg) 5332 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); 5333 5334 /* Only support flow control negotiation for netdev with 5335 * phy attached for now. 5336 */ 5337 if (!phydev) 5338 return -EOPNOTSUPP; 5339 5340 return phy_start_aneg(phydev); 5341 } 5342 5343 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, 5344 u8 *auto_neg, u32 *speed, u8 *duplex) 5345 { 5346 struct hclge_vport *vport = hclge_get_vport(handle); 5347 struct hclge_dev *hdev = vport->back; 5348 5349 if (speed) 5350 *speed = hdev->hw.mac.speed; 5351 if (duplex) 5352 *duplex = hdev->hw.mac.duplex; 5353 if (auto_neg) 5354 *auto_neg = hdev->hw.mac.autoneg; 5355 } 5356 5357 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) 5358 { 5359 struct hclge_vport *vport = hclge_get_vport(handle); 5360 struct hclge_dev *hdev = vport->back; 5361 5362 if (media_type) 5363 *media_type = hdev->hw.mac.media_type; 5364 } 5365 5366 static void hclge_get_mdix_mode(struct hnae3_handle *handle, 5367 u8 *tp_mdix_ctrl, u8 *tp_mdix) 5368 { 5369 struct hclge_vport *vport = hclge_get_vport(handle); 5370 struct hclge_dev *hdev = vport->back; 5371 struct phy_device *phydev = hdev->hw.mac.phydev; 5372 int mdix_ctrl, mdix, retval, is_resolved; 5373 5374 if (!phydev) { 5375 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 5376 *tp_mdix = ETH_TP_MDI_INVALID; 5377 return; 5378 } 5379 5380 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); 5381 5382 retval = phy_read(phydev, HCLGE_PHY_CSC_REG); 5383 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, 5384 HCLGE_PHY_MDIX_CTRL_S); 5385 5386 retval = phy_read(phydev, HCLGE_PHY_CSS_REG); 5387 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); 5388 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); 5389 5390 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); 5391 5392 switch (mdix_ctrl) { 5393 case 0x0: 5394 *tp_mdix_ctrl = ETH_TP_MDI; 5395 break; 5396 case 0x1: 5397 *tp_mdix_ctrl = ETH_TP_MDI_X; 5398 break; 5399 case 0x3: 5400 *tp_mdix_ctrl = ETH_TP_MDI_AUTO; 5401 break; 5402 default: 5403 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 5404 break; 5405 } 5406 5407 if (!is_resolved) 5408 *tp_mdix = ETH_TP_MDI_INVALID; 5409 else if (mdix) 5410 *tp_mdix = ETH_TP_MDI_X; 5411 else 5412 *tp_mdix = ETH_TP_MDI; 5413 } 5414 5415 static int hclge_init_client_instance(struct hnae3_client *client, 5416 struct hnae3_ae_dev *ae_dev) 5417 { 5418 struct hclge_dev *hdev = ae_dev->priv; 5419 struct hclge_vport *vport; 5420 int i, ret; 5421 5422 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 5423 vport = &hdev->vport[i]; 5424 5425 switch (client->type) { 5426 case HNAE3_CLIENT_KNIC: 5427 5428 hdev->nic_client = client; 5429 vport->nic.client = client; 5430 ret = client->ops->init_instance(&vport->nic); 5431 if (ret) 5432 return ret; 5433 5434 if (hdev->roce_client && 5435 hnae3_dev_roce_supported(hdev)) { 5436 struct hnae3_client *rc = hdev->roce_client; 5437 5438 ret = hclge_init_roce_base_info(vport); 5439 if (ret) 5440 return ret; 5441 5442 ret = rc->ops->init_instance(&vport->roce); 5443 if (ret) 5444 return ret; 5445 } 5446 5447 break; 5448 case HNAE3_CLIENT_UNIC: 5449 hdev->nic_client = client; 5450 vport->nic.client = client; 5451 5452 ret = client->ops->init_instance(&vport->nic); 5453 if (ret) 5454 return ret; 5455 5456 break; 5457 case HNAE3_CLIENT_ROCE: 5458 if (hnae3_dev_roce_supported(hdev)) { 5459 hdev->roce_client = client; 5460 vport->roce.client = client; 5461 } 5462 5463 if (hdev->roce_client && hdev->nic_client) { 5464 ret = hclge_init_roce_base_info(vport); 5465 if (ret) 5466 return ret; 5467 5468 ret = client->ops->init_instance(&vport->roce); 5469 if (ret) 5470 return ret; 5471 } 5472 } 5473 } 5474 5475 return 0; 5476 } 5477 5478 static void hclge_uninit_client_instance(struct hnae3_client *client, 5479 struct hnae3_ae_dev *ae_dev) 5480 { 5481 struct hclge_dev *hdev = ae_dev->priv; 5482 struct hclge_vport *vport; 5483 int i; 5484 5485 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 5486 vport = &hdev->vport[i]; 5487 if (hdev->roce_client) { 5488 hdev->roce_client->ops->uninit_instance(&vport->roce, 5489 0); 5490 hdev->roce_client = NULL; 5491 vport->roce.client = NULL; 5492 } 5493 if (client->type == HNAE3_CLIENT_ROCE) 5494 return; 5495 if (client->ops->uninit_instance) { 5496 client->ops->uninit_instance(&vport->nic, 0); 5497 hdev->nic_client = NULL; 5498 vport->nic.client = NULL; 5499 } 5500 } 5501 } 5502 5503 static int hclge_pci_init(struct hclge_dev *hdev) 5504 { 5505 struct pci_dev *pdev = hdev->pdev; 5506 struct hclge_hw *hw; 5507 int ret; 5508 5509 ret = pci_enable_device(pdev); 5510 if (ret) { 5511 dev_err(&pdev->dev, "failed to enable PCI device\n"); 5512 return ret; 5513 } 5514 5515 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 5516 if (ret) { 5517 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 5518 if (ret) { 5519 dev_err(&pdev->dev, 5520 "can't set consistent PCI DMA"); 5521 goto err_disable_device; 5522 } 5523 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); 5524 } 5525 5526 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); 5527 if (ret) { 5528 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 5529 goto err_disable_device; 5530 } 5531 5532 pci_set_master(pdev); 5533 hw = &hdev->hw; 5534 hw->back = hdev; 5535 hw->io_base = pcim_iomap(pdev, 2, 0); 5536 if (!hw->io_base) { 5537 dev_err(&pdev->dev, "Can't map configuration register space\n"); 5538 ret = -ENOMEM; 5539 goto err_clr_master; 5540 } 5541 5542 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); 5543 5544 return 0; 5545 err_clr_master: 5546 pci_clear_master(pdev); 5547 pci_release_regions(pdev); 5548 err_disable_device: 5549 pci_disable_device(pdev); 5550 5551 return ret; 5552 } 5553 5554 static void hclge_pci_uninit(struct hclge_dev *hdev) 5555 { 5556 struct pci_dev *pdev = hdev->pdev; 5557 5558 pcim_iounmap(pdev, hdev->hw.io_base); 5559 pci_free_irq_vectors(pdev); 5560 pci_clear_master(pdev); 5561 pci_release_mem_regions(pdev); 5562 pci_disable_device(pdev); 5563 } 5564 5565 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) 5566 { 5567 struct pci_dev *pdev = ae_dev->pdev; 5568 struct hclge_dev *hdev; 5569 int ret; 5570 5571 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 5572 if (!hdev) { 5573 ret = -ENOMEM; 5574 goto out; 5575 } 5576 5577 hdev->pdev = pdev; 5578 hdev->ae_dev = ae_dev; 5579 hdev->reset_type = HNAE3_NONE_RESET; 5580 hdev->reset_request = 0; 5581 hdev->reset_pending = 0; 5582 ae_dev->priv = hdev; 5583 5584 ret = hclge_pci_init(hdev); 5585 if (ret) { 5586 dev_err(&pdev->dev, "PCI init failed\n"); 5587 goto out; 5588 } 5589 5590 /* Firmware command queue initialize */ 5591 ret = hclge_cmd_queue_init(hdev); 5592 if (ret) { 5593 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); 5594 goto err_pci_uninit; 5595 } 5596 5597 /* Firmware command initialize */ 5598 ret = hclge_cmd_init(hdev); 5599 if (ret) 5600 goto err_cmd_uninit; 5601 5602 ret = hclge_get_cap(hdev); 5603 if (ret) { 5604 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 5605 ret); 5606 goto err_cmd_uninit; 5607 } 5608 5609 ret = hclge_configure(hdev); 5610 if (ret) { 5611 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 5612 goto err_cmd_uninit; 5613 } 5614 5615 ret = hclge_init_msi(hdev); 5616 if (ret) { 5617 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); 5618 goto err_cmd_uninit; 5619 } 5620 5621 ret = hclge_misc_irq_init(hdev); 5622 if (ret) { 5623 dev_err(&pdev->dev, 5624 "Misc IRQ(vector0) init error, ret = %d.\n", 5625 ret); 5626 goto err_msi_uninit; 5627 } 5628 5629 ret = hclge_alloc_tqps(hdev); 5630 if (ret) { 5631 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); 5632 goto err_msi_irq_uninit; 5633 } 5634 5635 ret = hclge_alloc_vport(hdev); 5636 if (ret) { 5637 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); 5638 goto err_msi_irq_uninit; 5639 } 5640 5641 ret = hclge_map_tqp(hdev); 5642 if (ret) { 5643 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 5644 goto err_msi_irq_uninit; 5645 } 5646 5647 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { 5648 ret = hclge_mac_mdio_config(hdev); 5649 if (ret) { 5650 dev_err(&hdev->pdev->dev, 5651 "mdio config fail ret=%d\n", ret); 5652 goto err_msi_irq_uninit; 5653 } 5654 } 5655 5656 ret = hclge_mac_init(hdev); 5657 if (ret) { 5658 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 5659 goto err_mdiobus_unreg; 5660 } 5661 5662 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 5663 if (ret) { 5664 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 5665 goto err_mdiobus_unreg; 5666 } 5667 5668 ret = hclge_init_vlan_config(hdev); 5669 if (ret) { 5670 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 5671 goto err_mdiobus_unreg; 5672 } 5673 5674 ret = hclge_tm_schd_init(hdev); 5675 if (ret) { 5676 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 5677 goto err_mdiobus_unreg; 5678 } 5679 5680 hclge_rss_init_cfg(hdev); 5681 ret = hclge_rss_init_hw(hdev); 5682 if (ret) { 5683 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 5684 goto err_mdiobus_unreg; 5685 } 5686 5687 ret = init_mgr_tbl(hdev); 5688 if (ret) { 5689 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); 5690 goto err_mdiobus_unreg; 5691 } 5692 5693 hclge_dcb_ops_set(hdev); 5694 5695 timer_setup(&hdev->service_timer, hclge_service_timer, 0); 5696 INIT_WORK(&hdev->service_task, hclge_service_task); 5697 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); 5698 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); 5699 5700 hclge_clear_all_event_cause(hdev); 5701 5702 /* Enable MISC vector(vector0) */ 5703 hclge_enable_vector(&hdev->misc_vector, true); 5704 5705 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); 5706 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5707 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 5708 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 5709 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 5710 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 5711 5712 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); 5713 return 0; 5714 5715 err_mdiobus_unreg: 5716 if (hdev->hw.mac.phydev) 5717 mdiobus_unregister(hdev->hw.mac.mdio_bus); 5718 err_msi_irq_uninit: 5719 hclge_misc_irq_uninit(hdev); 5720 err_msi_uninit: 5721 pci_free_irq_vectors(pdev); 5722 err_cmd_uninit: 5723 hclge_destroy_cmd_queue(&hdev->hw); 5724 err_pci_uninit: 5725 pcim_iounmap(pdev, hdev->hw.io_base); 5726 pci_clear_master(pdev); 5727 pci_release_regions(pdev); 5728 pci_disable_device(pdev); 5729 out: 5730 return ret; 5731 } 5732 5733 static void hclge_stats_clear(struct hclge_dev *hdev) 5734 { 5735 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); 5736 } 5737 5738 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) 5739 { 5740 struct hclge_dev *hdev = ae_dev->priv; 5741 struct pci_dev *pdev = ae_dev->pdev; 5742 int ret; 5743 5744 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5745 5746 hclge_stats_clear(hdev); 5747 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); 5748 5749 ret = hclge_cmd_init(hdev); 5750 if (ret) { 5751 dev_err(&pdev->dev, "Cmd queue init failed\n"); 5752 return ret; 5753 } 5754 5755 ret = hclge_get_cap(hdev); 5756 if (ret) { 5757 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 5758 ret); 5759 return ret; 5760 } 5761 5762 ret = hclge_configure(hdev); 5763 if (ret) { 5764 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 5765 return ret; 5766 } 5767 5768 ret = hclge_map_tqp(hdev); 5769 if (ret) { 5770 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 5771 return ret; 5772 } 5773 5774 ret = hclge_mac_init(hdev); 5775 if (ret) { 5776 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 5777 return ret; 5778 } 5779 5780 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 5781 if (ret) { 5782 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 5783 return ret; 5784 } 5785 5786 ret = hclge_init_vlan_config(hdev); 5787 if (ret) { 5788 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 5789 return ret; 5790 } 5791 5792 ret = hclge_tm_init_hw(hdev); 5793 if (ret) { 5794 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); 5795 return ret; 5796 } 5797 5798 ret = hclge_rss_init_hw(hdev); 5799 if (ret) { 5800 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 5801 return ret; 5802 } 5803 5804 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", 5805 HCLGE_DRIVER_NAME); 5806 5807 return 0; 5808 } 5809 5810 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 5811 { 5812 struct hclge_dev *hdev = ae_dev->priv; 5813 struct hclge_mac *mac = &hdev->hw.mac; 5814 5815 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5816 5817 if (hdev->service_timer.function) 5818 del_timer_sync(&hdev->service_timer); 5819 if (hdev->service_task.func) 5820 cancel_work_sync(&hdev->service_task); 5821 if (hdev->rst_service_task.func) 5822 cancel_work_sync(&hdev->rst_service_task); 5823 if (hdev->mbx_service_task.func) 5824 cancel_work_sync(&hdev->mbx_service_task); 5825 5826 if (mac->phydev) 5827 mdiobus_unregister(mac->mdio_bus); 5828 5829 /* Disable MISC vector(vector0) */ 5830 hclge_enable_vector(&hdev->misc_vector, false); 5831 synchronize_irq(hdev->misc_vector.vector_irq); 5832 5833 hclge_destroy_cmd_queue(&hdev->hw); 5834 hclge_misc_irq_uninit(hdev); 5835 hclge_pci_uninit(hdev); 5836 ae_dev->priv = NULL; 5837 } 5838 5839 static u32 hclge_get_max_channels(struct hnae3_handle *handle) 5840 { 5841 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 5842 struct hclge_vport *vport = hclge_get_vport(handle); 5843 struct hclge_dev *hdev = vport->back; 5844 5845 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 5846 } 5847 5848 static void hclge_get_channels(struct hnae3_handle *handle, 5849 struct ethtool_channels *ch) 5850 { 5851 struct hclge_vport *vport = hclge_get_vport(handle); 5852 5853 ch->max_combined = hclge_get_max_channels(handle); 5854 ch->other_count = 1; 5855 ch->max_other = 1; 5856 ch->combined_count = vport->alloc_tqps; 5857 } 5858 5859 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, 5860 u16 *free_tqps, u16 *max_rss_size) 5861 { 5862 struct hclge_vport *vport = hclge_get_vport(handle); 5863 struct hclge_dev *hdev = vport->back; 5864 u16 temp_tqps = 0; 5865 int i; 5866 5867 for (i = 0; i < hdev->num_tqps; i++) { 5868 if (!hdev->htqp[i].alloced) 5869 temp_tqps++; 5870 } 5871 *free_tqps = temp_tqps; 5872 *max_rss_size = hdev->rss_size_max; 5873 } 5874 5875 static void hclge_release_tqp(struct hclge_vport *vport) 5876 { 5877 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5878 struct hclge_dev *hdev = vport->back; 5879 int i; 5880 5881 for (i = 0; i < kinfo->num_tqps; i++) { 5882 struct hclge_tqp *tqp = 5883 container_of(kinfo->tqp[i], struct hclge_tqp, q); 5884 5885 tqp->q.handle = NULL; 5886 tqp->q.tqp_index = 0; 5887 tqp->alloced = false; 5888 } 5889 5890 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 5891 kinfo->tqp = NULL; 5892 } 5893 5894 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) 5895 { 5896 struct hclge_vport *vport = hclge_get_vport(handle); 5897 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5898 struct hclge_dev *hdev = vport->back; 5899 int cur_rss_size = kinfo->rss_size; 5900 int cur_tqps = kinfo->num_tqps; 5901 u16 tc_offset[HCLGE_MAX_TC_NUM]; 5902 u16 tc_valid[HCLGE_MAX_TC_NUM]; 5903 u16 tc_size[HCLGE_MAX_TC_NUM]; 5904 u16 roundup_size; 5905 u32 *rss_indir; 5906 int ret, i; 5907 5908 hclge_release_tqp(vport); 5909 5910 ret = hclge_knic_setup(vport, new_tqps_num); 5911 if (ret) { 5912 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); 5913 return ret; 5914 } 5915 5916 ret = hclge_map_tqp_to_vport(hdev, vport); 5917 if (ret) { 5918 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); 5919 return ret; 5920 } 5921 5922 ret = hclge_tm_schd_init(hdev); 5923 if (ret) { 5924 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); 5925 return ret; 5926 } 5927 5928 roundup_size = roundup_pow_of_two(kinfo->rss_size); 5929 roundup_size = ilog2(roundup_size); 5930 /* Set the RSS TC mode according to the new RSS size */ 5931 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 5932 tc_valid[i] = 0; 5933 5934 if (!(hdev->hw_tc_map & BIT(i))) 5935 continue; 5936 5937 tc_valid[i] = 1; 5938 tc_size[i] = roundup_size; 5939 tc_offset[i] = kinfo->rss_size * i; 5940 } 5941 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 5942 if (ret) 5943 return ret; 5944 5945 /* Reinitializes the rss indirect table according to the new RSS size */ 5946 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 5947 if (!rss_indir) 5948 return -ENOMEM; 5949 5950 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 5951 rss_indir[i] = i % kinfo->rss_size; 5952 5953 ret = hclge_set_rss(handle, rss_indir, NULL, 0); 5954 if (ret) 5955 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 5956 ret); 5957 5958 kfree(rss_indir); 5959 5960 if (!ret) 5961 dev_info(&hdev->pdev->dev, 5962 "Channels changed, rss_size from %d to %d, tqps from %d to %d", 5963 cur_rss_size, kinfo->rss_size, 5964 cur_tqps, kinfo->rss_size * kinfo->num_tc); 5965 5966 return ret; 5967 } 5968 5969 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, 5970 u32 *regs_num_64_bit) 5971 { 5972 struct hclge_desc desc; 5973 u32 total_num; 5974 int ret; 5975 5976 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); 5977 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5978 if (ret) { 5979 dev_err(&hdev->pdev->dev, 5980 "Query register number cmd failed, ret = %d.\n", ret); 5981 return ret; 5982 } 5983 5984 *regs_num_32_bit = le32_to_cpu(desc.data[0]); 5985 *regs_num_64_bit = le32_to_cpu(desc.data[1]); 5986 5987 total_num = *regs_num_32_bit + *regs_num_64_bit; 5988 if (!total_num) 5989 return -EINVAL; 5990 5991 return 0; 5992 } 5993 5994 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, 5995 void *data) 5996 { 5997 #define HCLGE_32_BIT_REG_RTN_DATANUM 8 5998 5999 struct hclge_desc *desc; 6000 u32 *reg_val = data; 6001 __le32 *desc_data; 6002 int cmd_num; 6003 int i, k, n; 6004 int ret; 6005 6006 if (regs_num == 0) 6007 return 0; 6008 6009 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); 6010 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 6011 if (!desc) 6012 return -ENOMEM; 6013 6014 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); 6015 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 6016 if (ret) { 6017 dev_err(&hdev->pdev->dev, 6018 "Query 32 bit register cmd failed, ret = %d.\n", ret); 6019 kfree(desc); 6020 return ret; 6021 } 6022 6023 for (i = 0; i < cmd_num; i++) { 6024 if (i == 0) { 6025 desc_data = (__le32 *)(&desc[i].data[0]); 6026 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; 6027 } else { 6028 desc_data = (__le32 *)(&desc[i]); 6029 n = HCLGE_32_BIT_REG_RTN_DATANUM; 6030 } 6031 for (k = 0; k < n; k++) { 6032 *reg_val++ = le32_to_cpu(*desc_data++); 6033 6034 regs_num--; 6035 if (!regs_num) 6036 break; 6037 } 6038 } 6039 6040 kfree(desc); 6041 return 0; 6042 } 6043 6044 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, 6045 void *data) 6046 { 6047 #define HCLGE_64_BIT_REG_RTN_DATANUM 4 6048 6049 struct hclge_desc *desc; 6050 u64 *reg_val = data; 6051 __le64 *desc_data; 6052 int cmd_num; 6053 int i, k, n; 6054 int ret; 6055 6056 if (regs_num == 0) 6057 return 0; 6058 6059 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); 6060 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 6061 if (!desc) 6062 return -ENOMEM; 6063 6064 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); 6065 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 6066 if (ret) { 6067 dev_err(&hdev->pdev->dev, 6068 "Query 64 bit register cmd failed, ret = %d.\n", ret); 6069 kfree(desc); 6070 return ret; 6071 } 6072 6073 for (i = 0; i < cmd_num; i++) { 6074 if (i == 0) { 6075 desc_data = (__le64 *)(&desc[i].data[0]); 6076 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; 6077 } else { 6078 desc_data = (__le64 *)(&desc[i]); 6079 n = HCLGE_64_BIT_REG_RTN_DATANUM; 6080 } 6081 for (k = 0; k < n; k++) { 6082 *reg_val++ = le64_to_cpu(*desc_data++); 6083 6084 regs_num--; 6085 if (!regs_num) 6086 break; 6087 } 6088 } 6089 6090 kfree(desc); 6091 return 0; 6092 } 6093 6094 static int hclge_get_regs_len(struct hnae3_handle *handle) 6095 { 6096 struct hclge_vport *vport = hclge_get_vport(handle); 6097 struct hclge_dev *hdev = vport->back; 6098 u32 regs_num_32_bit, regs_num_64_bit; 6099 int ret; 6100 6101 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 6102 if (ret) { 6103 dev_err(&hdev->pdev->dev, 6104 "Get register number failed, ret = %d.\n", ret); 6105 return -EOPNOTSUPP; 6106 } 6107 6108 return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); 6109 } 6110 6111 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, 6112 void *data) 6113 { 6114 struct hclge_vport *vport = hclge_get_vport(handle); 6115 struct hclge_dev *hdev = vport->back; 6116 u32 regs_num_32_bit, regs_num_64_bit; 6117 int ret; 6118 6119 *version = hdev->fw_version; 6120 6121 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 6122 if (ret) { 6123 dev_err(&hdev->pdev->dev, 6124 "Get register number failed, ret = %d.\n", ret); 6125 return; 6126 } 6127 6128 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data); 6129 if (ret) { 6130 dev_err(&hdev->pdev->dev, 6131 "Get 32 bit register failed, ret = %d.\n", ret); 6132 return; 6133 } 6134 6135 data = (u32 *)data + regs_num_32_bit; 6136 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, 6137 data); 6138 if (ret) 6139 dev_err(&hdev->pdev->dev, 6140 "Get 64 bit register failed, ret = %d.\n", ret); 6141 } 6142 6143 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) 6144 { 6145 struct hclge_set_led_state_cmd *req; 6146 struct hclge_desc desc; 6147 int ret; 6148 6149 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); 6150 6151 req = (struct hclge_set_led_state_cmd *)desc.data; 6152 hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, 6153 HCLGE_LED_LOCATE_STATE_S, locate_led_status); 6154 6155 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6156 if (ret) 6157 dev_err(&hdev->pdev->dev, 6158 "Send set led state cmd error, ret =%d\n", ret); 6159 6160 return ret; 6161 } 6162 6163 enum hclge_led_status { 6164 HCLGE_LED_OFF, 6165 HCLGE_LED_ON, 6166 HCLGE_LED_NO_CHANGE = 0xFF, 6167 }; 6168 6169 static int hclge_set_led_id(struct hnae3_handle *handle, 6170 enum ethtool_phys_id_state status) 6171 { 6172 struct hclge_vport *vport = hclge_get_vport(handle); 6173 struct hclge_dev *hdev = vport->back; 6174 6175 switch (status) { 6176 case ETHTOOL_ID_ACTIVE: 6177 return hclge_set_led_status(hdev, HCLGE_LED_ON); 6178 case ETHTOOL_ID_INACTIVE: 6179 return hclge_set_led_status(hdev, HCLGE_LED_OFF); 6180 default: 6181 return -EINVAL; 6182 } 6183 } 6184 6185 static void hclge_get_link_mode(struct hnae3_handle *handle, 6186 unsigned long *supported, 6187 unsigned long *advertising) 6188 { 6189 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); 6190 struct hclge_vport *vport = hclge_get_vport(handle); 6191 struct hclge_dev *hdev = vport->back; 6192 unsigned int idx = 0; 6193 6194 for (; idx < size; idx++) { 6195 supported[idx] = hdev->hw.mac.supported[idx]; 6196 advertising[idx] = hdev->hw.mac.advertising[idx]; 6197 } 6198 } 6199 6200 static void hclge_get_port_type(struct hnae3_handle *handle, 6201 u8 *port_type) 6202 { 6203 struct hclge_vport *vport = hclge_get_vport(handle); 6204 struct hclge_dev *hdev = vport->back; 6205 u8 media_type = hdev->hw.mac.media_type; 6206 6207 switch (media_type) { 6208 case HNAE3_MEDIA_TYPE_FIBER: 6209 *port_type = PORT_FIBRE; 6210 break; 6211 case HNAE3_MEDIA_TYPE_COPPER: 6212 *port_type = PORT_TP; 6213 break; 6214 case HNAE3_MEDIA_TYPE_UNKNOWN: 6215 default: 6216 *port_type = PORT_OTHER; 6217 break; 6218 } 6219 } 6220 6221 static const struct hnae3_ae_ops hclge_ops = { 6222 .init_ae_dev = hclge_init_ae_dev, 6223 .uninit_ae_dev = hclge_uninit_ae_dev, 6224 .init_client_instance = hclge_init_client_instance, 6225 .uninit_client_instance = hclge_uninit_client_instance, 6226 .map_ring_to_vector = hclge_map_ring_to_vector, 6227 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, 6228 .get_vector = hclge_get_vector, 6229 .put_vector = hclge_put_vector, 6230 .set_promisc_mode = hclge_set_promisc_mode, 6231 .set_loopback = hclge_set_loopback, 6232 .start = hclge_ae_start, 6233 .stop = hclge_ae_stop, 6234 .get_status = hclge_get_status, 6235 .get_ksettings_an_result = hclge_get_ksettings_an_result, 6236 .update_speed_duplex_h = hclge_update_speed_duplex_h, 6237 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, 6238 .get_media_type = hclge_get_media_type, 6239 .get_rss_key_size = hclge_get_rss_key_size, 6240 .get_rss_indir_size = hclge_get_rss_indir_size, 6241 .get_rss = hclge_get_rss, 6242 .set_rss = hclge_set_rss, 6243 .set_rss_tuple = hclge_set_rss_tuple, 6244 .get_rss_tuple = hclge_get_rss_tuple, 6245 .get_tc_size = hclge_get_tc_size, 6246 .get_mac_addr = hclge_get_mac_addr, 6247 .set_mac_addr = hclge_set_mac_addr, 6248 .add_uc_addr = hclge_add_uc_addr, 6249 .rm_uc_addr = hclge_rm_uc_addr, 6250 .add_mc_addr = hclge_add_mc_addr, 6251 .rm_mc_addr = hclge_rm_mc_addr, 6252 .update_mta_status = hclge_update_mta_status, 6253 .set_autoneg = hclge_set_autoneg, 6254 .get_autoneg = hclge_get_autoneg, 6255 .get_pauseparam = hclge_get_pauseparam, 6256 .set_pauseparam = hclge_set_pauseparam, 6257 .set_mtu = hclge_set_mtu, 6258 .reset_queue = hclge_reset_tqp, 6259 .get_stats = hclge_get_stats, 6260 .update_stats = hclge_update_stats, 6261 .get_strings = hclge_get_strings, 6262 .get_sset_count = hclge_get_sset_count, 6263 .get_fw_version = hclge_get_fw_version, 6264 .get_mdix_mode = hclge_get_mdix_mode, 6265 .enable_vlan_filter = hclge_enable_vlan_filter, 6266 .set_vlan_filter = hclge_set_vlan_filter, 6267 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 6268 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, 6269 .reset_event = hclge_reset_event, 6270 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, 6271 .set_channels = hclge_set_channels, 6272 .get_channels = hclge_get_channels, 6273 .get_flowctrl_adv = hclge_get_flowctrl_adv, 6274 .get_regs_len = hclge_get_regs_len, 6275 .get_regs = hclge_get_regs, 6276 .set_led_id = hclge_set_led_id, 6277 .get_link_mode = hclge_get_link_mode, 6278 .get_port_type = hclge_get_port_type, 6279 }; 6280 6281 static struct hnae3_ae_algo ae_algo = { 6282 .ops = &hclge_ops, 6283 .name = HCLGE_NAME, 6284 .pdev_id_table = ae_algo_pci_tbl, 6285 }; 6286 6287 static int hclge_init(void) 6288 { 6289 pr_info("%s is initializing\n", HCLGE_NAME); 6290 6291 hnae3_register_ae_algo(&ae_algo); 6292 6293 return 0; 6294 } 6295 6296 static void hclge_exit(void) 6297 { 6298 hnae3_unregister_ae_algo(&ae_algo); 6299 } 6300 module_init(hclge_init); 6301 module_exit(hclge_exit); 6302 6303 MODULE_LICENSE("GPL"); 6304 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 6305 MODULE_DESCRIPTION("HCLGE Driver"); 6306 MODULE_VERSION(HCLGE_MOD_VERSION); 6307