1 /* 2 * Copyright (c) 2016-2017 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/device.h> 12 #include <linux/etherdevice.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/netdevice.h> 18 #include <linux/pci.h> 19 #include <linux/platform_device.h> 20 #include <linux/if_vlan.h> 21 #include <net/rtnetlink.h> 22 #include "hclge_cmd.h" 23 #include "hclge_dcb.h" 24 #include "hclge_main.h" 25 #include "hclge_mbx.h" 26 #include "hclge_mdio.h" 27 #include "hclge_tm.h" 28 #include "hnae3.h" 29 30 #define HCLGE_NAME "hclge" 31 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) 32 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) 33 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) 34 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) 35 36 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 37 enum hclge_mta_dmac_sel_type mta_mac_sel, 38 bool enable); 39 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); 40 static int hclge_init_vlan_config(struct hclge_dev *hdev); 41 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); 42 43 static struct hnae3_ae_algo ae_algo; 44 45 static const struct pci_device_id ae_algo_pci_tbl[] = { 46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 53 /* required last entry */ 54 {0, } 55 }; 56 57 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); 58 59 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { 60 "Mac Loopback test", 61 "Serdes Loopback test", 62 "Phy Loopback test" 63 }; 64 65 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = { 66 {"igu_rx_oversize_pkt", 67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)}, 68 {"igu_rx_undersize_pkt", 69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)}, 70 {"igu_rx_out_all_pkt", 71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)}, 72 {"igu_rx_uni_pkt", 73 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)}, 74 {"igu_rx_multi_pkt", 75 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)}, 76 {"igu_rx_broad_pkt", 77 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)}, 78 {"egu_tx_out_all_pkt", 79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)}, 80 {"egu_tx_uni_pkt", 81 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)}, 82 {"egu_tx_multi_pkt", 83 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)}, 84 {"egu_tx_broad_pkt", 85 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)}, 86 {"ssu_ppp_mac_key_num", 87 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)}, 88 {"ssu_ppp_host_key_num", 89 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)}, 90 {"ppp_ssu_mac_rlt_num", 91 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)}, 92 {"ppp_ssu_host_rlt_num", 93 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)}, 94 {"ssu_tx_in_num", 95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)}, 96 {"ssu_tx_out_num", 97 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)}, 98 {"ssu_rx_in_num", 99 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)}, 100 {"ssu_rx_out_num", 101 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)} 102 }; 103 104 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = { 105 {"igu_rx_err_pkt", 106 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)}, 107 {"igu_rx_no_eof_pkt", 108 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)}, 109 {"igu_rx_no_sof_pkt", 110 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)}, 111 {"egu_tx_1588_pkt", 112 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)}, 113 {"ssu_full_drop_num", 114 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)}, 115 {"ssu_part_drop_num", 116 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)}, 117 {"ppp_key_drop_num", 118 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)}, 119 {"ppp_rlt_drop_num", 120 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)}, 121 {"ssu_key_drop_num", 122 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)}, 123 {"pkt_curr_buf_cnt", 124 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)}, 125 {"qcn_fb_rcv_cnt", 126 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)}, 127 {"qcn_fb_drop_cnt", 128 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)}, 129 {"qcn_fb_invaild_cnt", 130 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)}, 131 {"rx_packet_tc0_in_cnt", 132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)}, 133 {"rx_packet_tc1_in_cnt", 134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)}, 135 {"rx_packet_tc2_in_cnt", 136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)}, 137 {"rx_packet_tc3_in_cnt", 138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)}, 139 {"rx_packet_tc4_in_cnt", 140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)}, 141 {"rx_packet_tc5_in_cnt", 142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)}, 143 {"rx_packet_tc6_in_cnt", 144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)}, 145 {"rx_packet_tc7_in_cnt", 146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)}, 147 {"rx_packet_tc0_out_cnt", 148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)}, 149 {"rx_packet_tc1_out_cnt", 150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)}, 151 {"rx_packet_tc2_out_cnt", 152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)}, 153 {"rx_packet_tc3_out_cnt", 154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)}, 155 {"rx_packet_tc4_out_cnt", 156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)}, 157 {"rx_packet_tc5_out_cnt", 158 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)}, 159 {"rx_packet_tc6_out_cnt", 160 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)}, 161 {"rx_packet_tc7_out_cnt", 162 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)}, 163 {"tx_packet_tc0_in_cnt", 164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)}, 165 {"tx_packet_tc1_in_cnt", 166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)}, 167 {"tx_packet_tc2_in_cnt", 168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)}, 169 {"tx_packet_tc3_in_cnt", 170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)}, 171 {"tx_packet_tc4_in_cnt", 172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)}, 173 {"tx_packet_tc5_in_cnt", 174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)}, 175 {"tx_packet_tc6_in_cnt", 176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)}, 177 {"tx_packet_tc7_in_cnt", 178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)}, 179 {"tx_packet_tc0_out_cnt", 180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)}, 181 {"tx_packet_tc1_out_cnt", 182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)}, 183 {"tx_packet_tc2_out_cnt", 184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)}, 185 {"tx_packet_tc3_out_cnt", 186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)}, 187 {"tx_packet_tc4_out_cnt", 188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)}, 189 {"tx_packet_tc5_out_cnt", 190 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)}, 191 {"tx_packet_tc6_out_cnt", 192 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)}, 193 {"tx_packet_tc7_out_cnt", 194 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)}, 195 {"pkt_curr_buf_tc0_cnt", 196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)}, 197 {"pkt_curr_buf_tc1_cnt", 198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)}, 199 {"pkt_curr_buf_tc2_cnt", 200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)}, 201 {"pkt_curr_buf_tc3_cnt", 202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)}, 203 {"pkt_curr_buf_tc4_cnt", 204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)}, 205 {"pkt_curr_buf_tc5_cnt", 206 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)}, 207 {"pkt_curr_buf_tc6_cnt", 208 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)}, 209 {"pkt_curr_buf_tc7_cnt", 210 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)}, 211 {"mb_uncopy_num", 212 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)}, 213 {"lo_pri_unicast_rlt_drop_num", 214 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)}, 215 {"hi_pri_multicast_rlt_drop_num", 216 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)}, 217 {"lo_pri_multicast_rlt_drop_num", 218 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)}, 219 {"rx_oq_drop_pkt_cnt", 220 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)}, 221 {"tx_oq_drop_pkt_cnt", 222 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)}, 223 {"nic_l2_err_drop_pkt_cnt", 224 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)}, 225 {"roc_l2_err_drop_pkt_cnt", 226 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)} 227 }; 228 229 static const struct hclge_comm_stats_str g_mac_stats_string[] = { 230 {"mac_tx_mac_pause_num", 231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, 232 {"mac_rx_mac_pause_num", 233 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, 234 {"mac_tx_pfc_pri0_pkt_num", 235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, 236 {"mac_tx_pfc_pri1_pkt_num", 237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, 238 {"mac_tx_pfc_pri2_pkt_num", 239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, 240 {"mac_tx_pfc_pri3_pkt_num", 241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, 242 {"mac_tx_pfc_pri4_pkt_num", 243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, 244 {"mac_tx_pfc_pri5_pkt_num", 245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, 246 {"mac_tx_pfc_pri6_pkt_num", 247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, 248 {"mac_tx_pfc_pri7_pkt_num", 249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, 250 {"mac_rx_pfc_pri0_pkt_num", 251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, 252 {"mac_rx_pfc_pri1_pkt_num", 253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, 254 {"mac_rx_pfc_pri2_pkt_num", 255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, 256 {"mac_rx_pfc_pri3_pkt_num", 257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, 258 {"mac_rx_pfc_pri4_pkt_num", 259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, 260 {"mac_rx_pfc_pri5_pkt_num", 261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, 262 {"mac_rx_pfc_pri6_pkt_num", 263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, 264 {"mac_rx_pfc_pri7_pkt_num", 265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, 266 {"mac_tx_total_pkt_num", 267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, 268 {"mac_tx_total_oct_num", 269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, 270 {"mac_tx_good_pkt_num", 271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, 272 {"mac_tx_bad_pkt_num", 273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, 274 {"mac_tx_good_oct_num", 275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, 276 {"mac_tx_bad_oct_num", 277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, 278 {"mac_tx_uni_pkt_num", 279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, 280 {"mac_tx_multi_pkt_num", 281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, 282 {"mac_tx_broad_pkt_num", 283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, 284 {"mac_tx_undersize_pkt_num", 285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, 286 {"mac_tx_oversize_pkt_num", 287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, 288 {"mac_tx_64_oct_pkt_num", 289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, 290 {"mac_tx_65_127_oct_pkt_num", 291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, 292 {"mac_tx_128_255_oct_pkt_num", 293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, 294 {"mac_tx_256_511_oct_pkt_num", 295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, 296 {"mac_tx_512_1023_oct_pkt_num", 297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, 298 {"mac_tx_1024_1518_oct_pkt_num", 299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, 300 {"mac_tx_1519_2047_oct_pkt_num", 301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, 302 {"mac_tx_2048_4095_oct_pkt_num", 303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, 304 {"mac_tx_4096_8191_oct_pkt_num", 305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, 306 {"mac_tx_8192_9216_oct_pkt_num", 307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, 308 {"mac_tx_9217_12287_oct_pkt_num", 309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, 310 {"mac_tx_12288_16383_oct_pkt_num", 311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, 312 {"mac_tx_1519_max_good_pkt_num", 313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, 314 {"mac_tx_1519_max_bad_pkt_num", 315 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, 316 {"mac_rx_total_pkt_num", 317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, 318 {"mac_rx_total_oct_num", 319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, 320 {"mac_rx_good_pkt_num", 321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, 322 {"mac_rx_bad_pkt_num", 323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, 324 {"mac_rx_good_oct_num", 325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, 326 {"mac_rx_bad_oct_num", 327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, 328 {"mac_rx_uni_pkt_num", 329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, 330 {"mac_rx_multi_pkt_num", 331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, 332 {"mac_rx_broad_pkt_num", 333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, 334 {"mac_rx_undersize_pkt_num", 335 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, 336 {"mac_rx_oversize_pkt_num", 337 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, 338 {"mac_rx_64_oct_pkt_num", 339 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, 340 {"mac_rx_65_127_oct_pkt_num", 341 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, 342 {"mac_rx_128_255_oct_pkt_num", 343 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, 344 {"mac_rx_256_511_oct_pkt_num", 345 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, 346 {"mac_rx_512_1023_oct_pkt_num", 347 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, 348 {"mac_rx_1024_1518_oct_pkt_num", 349 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, 350 {"mac_rx_1519_2047_oct_pkt_num", 351 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, 352 {"mac_rx_2048_4095_oct_pkt_num", 353 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, 354 {"mac_rx_4096_8191_oct_pkt_num", 355 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, 356 {"mac_rx_8192_9216_oct_pkt_num", 357 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, 358 {"mac_rx_9217_12287_oct_pkt_num", 359 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, 360 {"mac_rx_12288_16383_oct_pkt_num", 361 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, 362 {"mac_rx_1519_max_good_pkt_num", 363 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, 364 {"mac_rx_1519_max_bad_pkt_num", 365 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, 366 367 {"mac_tx_fragment_pkt_num", 368 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, 369 {"mac_tx_undermin_pkt_num", 370 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, 371 {"mac_tx_jabber_pkt_num", 372 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, 373 {"mac_tx_err_all_pkt_num", 374 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, 375 {"mac_tx_from_app_good_pkt_num", 376 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, 377 {"mac_tx_from_app_bad_pkt_num", 378 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, 379 {"mac_rx_fragment_pkt_num", 380 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, 381 {"mac_rx_undermin_pkt_num", 382 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, 383 {"mac_rx_jabber_pkt_num", 384 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, 385 {"mac_rx_fcs_err_pkt_num", 386 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, 387 {"mac_rx_send_app_good_pkt_num", 388 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, 389 {"mac_rx_send_app_bad_pkt_num", 390 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} 391 }; 392 393 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { 394 { 395 .flags = HCLGE_MAC_MGR_MASK_VLAN_B, 396 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP), 397 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), 398 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)), 399 .i_port_bitmap = 0x1, 400 }, 401 }; 402 403 static int hclge_64_bit_update_stats(struct hclge_dev *hdev) 404 { 405 #define HCLGE_64_BIT_CMD_NUM 5 406 #define HCLGE_64_BIT_RTN_DATANUM 4 407 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); 408 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; 409 __le64 *desc_data; 410 int i, k, n; 411 int ret; 412 413 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true); 414 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM); 415 if (ret) { 416 dev_err(&hdev->pdev->dev, 417 "Get 64 bit pkt stats fail, status = %d.\n", ret); 418 return ret; 419 } 420 421 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { 422 if (unlikely(i == 0)) { 423 desc_data = (__le64 *)(&desc[i].data[0]); 424 n = HCLGE_64_BIT_RTN_DATANUM - 1; 425 } else { 426 desc_data = (__le64 *)(&desc[i]); 427 n = HCLGE_64_BIT_RTN_DATANUM; 428 } 429 for (k = 0; k < n; k++) { 430 *data++ += le64_to_cpu(*desc_data); 431 desc_data++; 432 } 433 } 434 435 return 0; 436 } 437 438 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats) 439 { 440 stats->pkt_curr_buf_cnt = 0; 441 stats->pkt_curr_buf_tc0_cnt = 0; 442 stats->pkt_curr_buf_tc1_cnt = 0; 443 stats->pkt_curr_buf_tc2_cnt = 0; 444 stats->pkt_curr_buf_tc3_cnt = 0; 445 stats->pkt_curr_buf_tc4_cnt = 0; 446 stats->pkt_curr_buf_tc5_cnt = 0; 447 stats->pkt_curr_buf_tc6_cnt = 0; 448 stats->pkt_curr_buf_tc7_cnt = 0; 449 } 450 451 static int hclge_32_bit_update_stats(struct hclge_dev *hdev) 452 { 453 #define HCLGE_32_BIT_CMD_NUM 8 454 #define HCLGE_32_BIT_RTN_DATANUM 8 455 456 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; 457 struct hclge_32_bit_stats *all_32_bit_stats; 458 __le32 *desc_data; 459 int i, k, n; 460 u64 *data; 461 int ret; 462 463 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats; 464 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt); 465 466 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true); 467 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM); 468 if (ret) { 469 dev_err(&hdev->pdev->dev, 470 "Get 32 bit pkt stats fail, status = %d.\n", ret); 471 472 return ret; 473 } 474 475 hclge_reset_partial_32bit_counter(all_32_bit_stats); 476 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { 477 if (unlikely(i == 0)) { 478 __le16 *desc_data_16bit; 479 480 all_32_bit_stats->igu_rx_err_pkt += 481 le32_to_cpu(desc[i].data[0]); 482 483 desc_data_16bit = (__le16 *)&desc[i].data[1]; 484 all_32_bit_stats->igu_rx_no_eof_pkt += 485 le16_to_cpu(*desc_data_16bit); 486 487 desc_data_16bit++; 488 all_32_bit_stats->igu_rx_no_sof_pkt += 489 le16_to_cpu(*desc_data_16bit); 490 491 desc_data = &desc[i].data[2]; 492 n = HCLGE_32_BIT_RTN_DATANUM - 4; 493 } else { 494 desc_data = (__le32 *)&desc[i]; 495 n = HCLGE_32_BIT_RTN_DATANUM; 496 } 497 for (k = 0; k < n; k++) { 498 *data++ += le32_to_cpu(*desc_data); 499 desc_data++; 500 } 501 } 502 503 return 0; 504 } 505 506 static int hclge_mac_update_stats(struct hclge_dev *hdev) 507 { 508 #define HCLGE_MAC_CMD_NUM 21 509 #define HCLGE_RTN_DATA_NUM 4 510 511 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); 512 struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; 513 __le64 *desc_data; 514 int i, k, n; 515 int ret; 516 517 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); 518 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); 519 if (ret) { 520 dev_err(&hdev->pdev->dev, 521 "Get MAC pkt stats fail, status = %d.\n", ret); 522 523 return ret; 524 } 525 526 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { 527 if (unlikely(i == 0)) { 528 desc_data = (__le64 *)(&desc[i].data[0]); 529 n = HCLGE_RTN_DATA_NUM - 2; 530 } else { 531 desc_data = (__le64 *)(&desc[i]); 532 n = HCLGE_RTN_DATA_NUM; 533 } 534 for (k = 0; k < n; k++) { 535 *data++ += le64_to_cpu(*desc_data); 536 desc_data++; 537 } 538 } 539 540 return 0; 541 } 542 543 static int hclge_tqps_update_stats(struct hnae3_handle *handle) 544 { 545 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 546 struct hclge_vport *vport = hclge_get_vport(handle); 547 struct hclge_dev *hdev = vport->back; 548 struct hnae3_queue *queue; 549 struct hclge_desc desc[1]; 550 struct hclge_tqp *tqp; 551 int ret, i; 552 553 for (i = 0; i < kinfo->num_tqps; i++) { 554 queue = handle->kinfo.tqp[i]; 555 tqp = container_of(queue, struct hclge_tqp, q); 556 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 557 hclge_cmd_setup_basic_desc(&desc[0], 558 HCLGE_OPC_QUERY_RX_STATUS, 559 true); 560 561 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 562 ret = hclge_cmd_send(&hdev->hw, desc, 1); 563 if (ret) { 564 dev_err(&hdev->pdev->dev, 565 "Query tqp stat fail, status = %d,queue = %d\n", 566 ret, i); 567 return ret; 568 } 569 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 570 le32_to_cpu(desc[0].data[1]); 571 } 572 573 for (i = 0; i < kinfo->num_tqps; i++) { 574 queue = handle->kinfo.tqp[i]; 575 tqp = container_of(queue, struct hclge_tqp, q); 576 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 577 hclge_cmd_setup_basic_desc(&desc[0], 578 HCLGE_OPC_QUERY_TX_STATUS, 579 true); 580 581 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 582 ret = hclge_cmd_send(&hdev->hw, desc, 1); 583 if (ret) { 584 dev_err(&hdev->pdev->dev, 585 "Query tqp stat fail, status = %d,queue = %d\n", 586 ret, i); 587 return ret; 588 } 589 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 590 le32_to_cpu(desc[0].data[1]); 591 } 592 593 return 0; 594 } 595 596 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 597 { 598 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 599 struct hclge_tqp *tqp; 600 u64 *buff = data; 601 int i; 602 603 for (i = 0; i < kinfo->num_tqps; i++) { 604 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 605 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 606 } 607 608 for (i = 0; i < kinfo->num_tqps; i++) { 609 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 610 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 611 } 612 613 return buff; 614 } 615 616 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) 617 { 618 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 619 620 return kinfo->num_tqps * (2); 621 } 622 623 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 624 { 625 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 626 u8 *buff = data; 627 int i = 0; 628 629 for (i = 0; i < kinfo->num_tqps; i++) { 630 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], 631 struct hclge_tqp, q); 632 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", 633 tqp->index); 634 buff = buff + ETH_GSTRING_LEN; 635 } 636 637 for (i = 0; i < kinfo->num_tqps; i++) { 638 struct hclge_tqp *tqp = container_of(kinfo->tqp[i], 639 struct hclge_tqp, q); 640 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", 641 tqp->index); 642 buff = buff + ETH_GSTRING_LEN; 643 } 644 645 return buff; 646 } 647 648 static u64 *hclge_comm_get_stats(void *comm_stats, 649 const struct hclge_comm_stats_str strs[], 650 int size, u64 *data) 651 { 652 u64 *buf = data; 653 u32 i; 654 655 for (i = 0; i < size; i++) 656 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); 657 658 return buf + size; 659 } 660 661 static u8 *hclge_comm_get_strings(u32 stringset, 662 const struct hclge_comm_stats_str strs[], 663 int size, u8 *data) 664 { 665 char *buff = (char *)data; 666 u32 i; 667 668 if (stringset != ETH_SS_STATS) 669 return buff; 670 671 for (i = 0; i < size; i++) { 672 snprintf(buff, ETH_GSTRING_LEN, 673 strs[i].desc); 674 buff = buff + ETH_GSTRING_LEN; 675 } 676 677 return (u8 *)buff; 678 } 679 680 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, 681 struct net_device_stats *net_stats) 682 { 683 net_stats->tx_dropped = 0; 684 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num; 685 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; 686 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; 687 688 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; 689 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; 690 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; 691 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; 692 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; 693 694 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; 695 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; 696 697 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; 698 net_stats->rx_length_errors = 699 hw_stats->mac_stats.mac_rx_undersize_pkt_num; 700 net_stats->rx_length_errors += 701 hw_stats->mac_stats.mac_rx_oversize_pkt_num; 702 net_stats->rx_over_errors = 703 hw_stats->mac_stats.mac_rx_oversize_pkt_num; 704 } 705 706 static void hclge_update_stats_for_all(struct hclge_dev *hdev) 707 { 708 struct hnae3_handle *handle; 709 int status; 710 711 handle = &hdev->vport[0].nic; 712 if (handle->client) { 713 status = hclge_tqps_update_stats(handle); 714 if (status) { 715 dev_err(&hdev->pdev->dev, 716 "Update TQPS stats fail, status = %d.\n", 717 status); 718 } 719 } 720 721 status = hclge_mac_update_stats(hdev); 722 if (status) 723 dev_err(&hdev->pdev->dev, 724 "Update MAC stats fail, status = %d.\n", status); 725 726 status = hclge_32_bit_update_stats(hdev); 727 if (status) 728 dev_err(&hdev->pdev->dev, 729 "Update 32 bit stats fail, status = %d.\n", 730 status); 731 732 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); 733 } 734 735 static void hclge_update_stats(struct hnae3_handle *handle, 736 struct net_device_stats *net_stats) 737 { 738 struct hclge_vport *vport = hclge_get_vport(handle); 739 struct hclge_dev *hdev = vport->back; 740 struct hclge_hw_stats *hw_stats = &hdev->hw_stats; 741 int status; 742 743 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) 744 return; 745 746 status = hclge_mac_update_stats(hdev); 747 if (status) 748 dev_err(&hdev->pdev->dev, 749 "Update MAC stats fail, status = %d.\n", 750 status); 751 752 status = hclge_32_bit_update_stats(hdev); 753 if (status) 754 dev_err(&hdev->pdev->dev, 755 "Update 32 bit stats fail, status = %d.\n", 756 status); 757 758 status = hclge_64_bit_update_stats(hdev); 759 if (status) 760 dev_err(&hdev->pdev->dev, 761 "Update 64 bit stats fail, status = %d.\n", 762 status); 763 764 status = hclge_tqps_update_stats(handle); 765 if (status) 766 dev_err(&hdev->pdev->dev, 767 "Update TQPS stats fail, status = %d.\n", 768 status); 769 770 hclge_update_netstat(hw_stats, net_stats); 771 772 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); 773 } 774 775 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) 776 { 777 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7 778 779 struct hclge_vport *vport = hclge_get_vport(handle); 780 struct hclge_dev *hdev = vport->back; 781 int count = 0; 782 783 /* Loopback test support rules: 784 * mac: only GE mode support 785 * serdes: all mac mode will support include GE/XGE/LGE/CGE 786 * phy: only support when phy device exist on board 787 */ 788 if (stringset == ETH_SS_TEST) { 789 /* clear loopback bit flags at first */ 790 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); 791 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || 792 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || 793 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { 794 count += 1; 795 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; 796 } else { 797 count = -EOPNOTSUPP; 798 } 799 } else if (stringset == ETH_SS_STATS) { 800 count = ARRAY_SIZE(g_mac_stats_string) + 801 ARRAY_SIZE(g_all_32bit_stats_string) + 802 ARRAY_SIZE(g_all_64bit_stats_string) + 803 hclge_tqps_get_sset_count(handle, stringset); 804 } 805 806 return count; 807 } 808 809 static void hclge_get_strings(struct hnae3_handle *handle, 810 u32 stringset, 811 u8 *data) 812 { 813 u8 *p = (char *)data; 814 int size; 815 816 if (stringset == ETH_SS_STATS) { 817 size = ARRAY_SIZE(g_mac_stats_string); 818 p = hclge_comm_get_strings(stringset, 819 g_mac_stats_string, 820 size, 821 p); 822 size = ARRAY_SIZE(g_all_32bit_stats_string); 823 p = hclge_comm_get_strings(stringset, 824 g_all_32bit_stats_string, 825 size, 826 p); 827 size = ARRAY_SIZE(g_all_64bit_stats_string); 828 p = hclge_comm_get_strings(stringset, 829 g_all_64bit_stats_string, 830 size, 831 p); 832 p = hclge_tqps_get_strings(handle, p); 833 } else if (stringset == ETH_SS_TEST) { 834 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) { 835 memcpy(p, 836 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC], 837 ETH_GSTRING_LEN); 838 p += ETH_GSTRING_LEN; 839 } 840 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) { 841 memcpy(p, 842 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES], 843 ETH_GSTRING_LEN); 844 p += ETH_GSTRING_LEN; 845 } 846 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { 847 memcpy(p, 848 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY], 849 ETH_GSTRING_LEN); 850 p += ETH_GSTRING_LEN; 851 } 852 } 853 } 854 855 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) 856 { 857 struct hclge_vport *vport = hclge_get_vport(handle); 858 struct hclge_dev *hdev = vport->back; 859 u64 *p; 860 861 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, 862 g_mac_stats_string, 863 ARRAY_SIZE(g_mac_stats_string), 864 data); 865 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats, 866 g_all_32bit_stats_string, 867 ARRAY_SIZE(g_all_32bit_stats_string), 868 p); 869 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats, 870 g_all_64bit_stats_string, 871 ARRAY_SIZE(g_all_64bit_stats_string), 872 p); 873 p = hclge_tqps_get_stats(handle, p); 874 } 875 876 static int hclge_parse_func_status(struct hclge_dev *hdev, 877 struct hclge_func_status_cmd *status) 878 { 879 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) 880 return -EINVAL; 881 882 /* Set the pf to main pf */ 883 if (status->pf_state & HCLGE_PF_STATE_MAIN) 884 hdev->flag |= HCLGE_FLAG_MAIN; 885 else 886 hdev->flag &= ~HCLGE_FLAG_MAIN; 887 888 return 0; 889 } 890 891 static int hclge_query_function_status(struct hclge_dev *hdev) 892 { 893 struct hclge_func_status_cmd *req; 894 struct hclge_desc desc; 895 int timeout = 0; 896 int ret; 897 898 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); 899 req = (struct hclge_func_status_cmd *)desc.data; 900 901 do { 902 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 903 if (ret) { 904 dev_err(&hdev->pdev->dev, 905 "query function status failed %d.\n", 906 ret); 907 908 return ret; 909 } 910 911 /* Check pf reset is done */ 912 if (req->pf_state) 913 break; 914 usleep_range(1000, 2000); 915 } while (timeout++ < 5); 916 917 ret = hclge_parse_func_status(hdev, req); 918 919 return ret; 920 } 921 922 static int hclge_query_pf_resource(struct hclge_dev *hdev) 923 { 924 struct hclge_pf_res_cmd *req; 925 struct hclge_desc desc; 926 int ret; 927 928 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); 929 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 930 if (ret) { 931 dev_err(&hdev->pdev->dev, 932 "query pf resource failed %d.\n", ret); 933 return ret; 934 } 935 936 req = (struct hclge_pf_res_cmd *)desc.data; 937 hdev->num_tqps = __le16_to_cpu(req->tqp_num); 938 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; 939 940 if (hnae3_dev_roce_supported(hdev)) { 941 hdev->num_roce_msi = 942 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), 943 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 944 945 /* PF should have NIC vectors and Roce vectors, 946 * NIC vectors are queued before Roce vectors. 947 */ 948 hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET; 949 } else { 950 hdev->num_msi = 951 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), 952 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 953 } 954 955 return 0; 956 } 957 958 static int hclge_parse_speed(int speed_cmd, int *speed) 959 { 960 switch (speed_cmd) { 961 case 6: 962 *speed = HCLGE_MAC_SPEED_10M; 963 break; 964 case 7: 965 *speed = HCLGE_MAC_SPEED_100M; 966 break; 967 case 0: 968 *speed = HCLGE_MAC_SPEED_1G; 969 break; 970 case 1: 971 *speed = HCLGE_MAC_SPEED_10G; 972 break; 973 case 2: 974 *speed = HCLGE_MAC_SPEED_25G; 975 break; 976 case 3: 977 *speed = HCLGE_MAC_SPEED_40G; 978 break; 979 case 4: 980 *speed = HCLGE_MAC_SPEED_50G; 981 break; 982 case 5: 983 *speed = HCLGE_MAC_SPEED_100G; 984 break; 985 default: 986 return -EINVAL; 987 } 988 989 return 0; 990 } 991 992 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, 993 u8 speed_ability) 994 { 995 unsigned long *supported = hdev->hw.mac.supported; 996 997 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 998 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 999 supported); 1000 1001 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 1002 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 1003 supported); 1004 1005 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 1006 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1007 supported); 1008 1009 if (speed_ability & HCLGE_SUPPORT_50G_BIT) 1010 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 1011 supported); 1012 1013 if (speed_ability & HCLGE_SUPPORT_100G_BIT) 1014 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 1015 supported); 1016 1017 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); 1018 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); 1019 } 1020 1021 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) 1022 { 1023 u8 media_type = hdev->hw.mac.media_type; 1024 1025 if (media_type != HNAE3_MEDIA_TYPE_FIBER) 1026 return; 1027 1028 hclge_parse_fiber_link_mode(hdev, speed_ability); 1029 } 1030 1031 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) 1032 { 1033 struct hclge_cfg_param_cmd *req; 1034 u64 mac_addr_tmp_high; 1035 u64 mac_addr_tmp; 1036 int i; 1037 1038 req = (struct hclge_cfg_param_cmd *)desc[0].data; 1039 1040 /* get the configuration */ 1041 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), 1042 HCLGE_CFG_VMDQ_M, 1043 HCLGE_CFG_VMDQ_S); 1044 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]), 1045 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); 1046 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]), 1047 HCLGE_CFG_TQP_DESC_N_M, 1048 HCLGE_CFG_TQP_DESC_N_S); 1049 1050 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]), 1051 HCLGE_CFG_PHY_ADDR_M, 1052 HCLGE_CFG_PHY_ADDR_S); 1053 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]), 1054 HCLGE_CFG_MEDIA_TP_M, 1055 HCLGE_CFG_MEDIA_TP_S); 1056 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]), 1057 HCLGE_CFG_RX_BUF_LEN_M, 1058 HCLGE_CFG_RX_BUF_LEN_S); 1059 /* get mac_address */ 1060 mac_addr_tmp = __le32_to_cpu(req->param[2]); 1061 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]), 1062 HCLGE_CFG_MAC_ADDR_H_M, 1063 HCLGE_CFG_MAC_ADDR_H_S); 1064 1065 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 1066 1067 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), 1068 HCLGE_CFG_DEFAULT_SPEED_M, 1069 HCLGE_CFG_DEFAULT_SPEED_S); 1070 cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]), 1071 HCLGE_CFG_RSS_SIZE_M, 1072 HCLGE_CFG_RSS_SIZE_S); 1073 1074 for (i = 0; i < ETH_ALEN; i++) 1075 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 1076 1077 req = (struct hclge_cfg_param_cmd *)desc[1].data; 1078 cfg->numa_node_map = __le32_to_cpu(req->param[0]); 1079 1080 cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]), 1081 HCLGE_CFG_SPEED_ABILITY_M, 1082 HCLGE_CFG_SPEED_ABILITY_S); 1083 } 1084 1085 /* hclge_get_cfg: query the static parameter from flash 1086 * @hdev: pointer to struct hclge_dev 1087 * @hcfg: the config structure to be getted 1088 */ 1089 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) 1090 { 1091 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; 1092 struct hclge_cfg_param_cmd *req; 1093 int i, ret; 1094 1095 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { 1096 u32 offset = 0; 1097 1098 req = (struct hclge_cfg_param_cmd *)desc[i].data; 1099 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, 1100 true); 1101 hnae_set_field(offset, HCLGE_CFG_OFFSET_M, 1102 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); 1103 /* Len should be united by 4 bytes when send to hardware */ 1104 hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, 1105 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); 1106 req->offset = cpu_to_le32(offset); 1107 } 1108 1109 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); 1110 if (ret) { 1111 dev_err(&hdev->pdev->dev, 1112 "get config failed %d.\n", ret); 1113 return ret; 1114 } 1115 1116 hclge_parse_cfg(hcfg, desc); 1117 return 0; 1118 } 1119 1120 static int hclge_get_cap(struct hclge_dev *hdev) 1121 { 1122 int ret; 1123 1124 ret = hclge_query_function_status(hdev); 1125 if (ret) { 1126 dev_err(&hdev->pdev->dev, 1127 "query function status error %d.\n", ret); 1128 return ret; 1129 } 1130 1131 /* get pf resource */ 1132 ret = hclge_query_pf_resource(hdev); 1133 if (ret) { 1134 dev_err(&hdev->pdev->dev, 1135 "query pf resource error %d.\n", ret); 1136 return ret; 1137 } 1138 1139 return 0; 1140 } 1141 1142 static int hclge_configure(struct hclge_dev *hdev) 1143 { 1144 struct hclge_cfg cfg; 1145 int ret, i; 1146 1147 ret = hclge_get_cfg(hdev, &cfg); 1148 if (ret) { 1149 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); 1150 return ret; 1151 } 1152 1153 hdev->num_vmdq_vport = cfg.vmdq_vport_num; 1154 hdev->base_tqp_pid = 0; 1155 hdev->rss_size_max = cfg.rss_size_max; 1156 hdev->rx_buf_len = cfg.rx_buf_len; 1157 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); 1158 hdev->hw.mac.media_type = cfg.media_type; 1159 hdev->hw.mac.phy_addr = cfg.phy_addr; 1160 hdev->num_desc = cfg.tqp_desc_num; 1161 hdev->tm_info.num_pg = 1; 1162 hdev->tc_max = cfg.tc_num; 1163 hdev->tm_info.hw_pfc_map = 0; 1164 1165 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); 1166 if (ret) { 1167 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); 1168 return ret; 1169 } 1170 1171 hclge_parse_link_mode(hdev, cfg.speed_ability); 1172 1173 if ((hdev->tc_max > HNAE3_MAX_TC) || 1174 (hdev->tc_max < 1)) { 1175 dev_warn(&hdev->pdev->dev, "TC num = %d.\n", 1176 hdev->tc_max); 1177 hdev->tc_max = 1; 1178 } 1179 1180 /* Dev does not support DCB */ 1181 if (!hnae3_dev_dcb_supported(hdev)) { 1182 hdev->tc_max = 1; 1183 hdev->pfc_max = 0; 1184 } else { 1185 hdev->pfc_max = hdev->tc_max; 1186 } 1187 1188 hdev->tm_info.num_tc = hdev->tc_max; 1189 1190 /* Currently not support uncontiuous tc */ 1191 for (i = 0; i < hdev->tm_info.num_tc; i++) 1192 hnae_set_bit(hdev->hw_tc_map, i, 1); 1193 1194 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; 1195 1196 return ret; 1197 } 1198 1199 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, 1200 int tso_mss_max) 1201 { 1202 struct hclge_cfg_tso_status_cmd *req; 1203 struct hclge_desc desc; 1204 u16 tso_mss; 1205 1206 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); 1207 1208 req = (struct hclge_cfg_tso_status_cmd *)desc.data; 1209 1210 tso_mss = 0; 1211 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1212 HCLGE_TSO_MSS_MIN_S, tso_mss_min); 1213 req->tso_mss_min = cpu_to_le16(tso_mss); 1214 1215 tso_mss = 0; 1216 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1217 HCLGE_TSO_MSS_MIN_S, tso_mss_max); 1218 req->tso_mss_max = cpu_to_le16(tso_mss); 1219 1220 return hclge_cmd_send(&hdev->hw, &desc, 1); 1221 } 1222 1223 static int hclge_alloc_tqps(struct hclge_dev *hdev) 1224 { 1225 struct hclge_tqp *tqp; 1226 int i; 1227 1228 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 1229 sizeof(struct hclge_tqp), GFP_KERNEL); 1230 if (!hdev->htqp) 1231 return -ENOMEM; 1232 1233 tqp = hdev->htqp; 1234 1235 for (i = 0; i < hdev->num_tqps; i++) { 1236 tqp->dev = &hdev->pdev->dev; 1237 tqp->index = i; 1238 1239 tqp->q.ae_algo = &ae_algo; 1240 tqp->q.buf_size = hdev->rx_buf_len; 1241 tqp->q.desc_num = hdev->num_desc; 1242 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + 1243 i * HCLGE_TQP_REG_SIZE; 1244 1245 tqp++; 1246 } 1247 1248 return 0; 1249 } 1250 1251 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, 1252 u16 tqp_pid, u16 tqp_vid, bool is_pf) 1253 { 1254 struct hclge_tqp_map_cmd *req; 1255 struct hclge_desc desc; 1256 int ret; 1257 1258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); 1259 1260 req = (struct hclge_tqp_map_cmd *)desc.data; 1261 req->tqp_id = cpu_to_le16(tqp_pid); 1262 req->tqp_vf = func_id; 1263 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | 1264 1 << HCLGE_TQP_MAP_EN_B; 1265 req->tqp_vid = cpu_to_le16(tqp_vid); 1266 1267 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1268 if (ret) { 1269 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", 1270 ret); 1271 return ret; 1272 } 1273 1274 return 0; 1275 } 1276 1277 static int hclge_assign_tqp(struct hclge_vport *vport, 1278 struct hnae3_queue **tqp, u16 num_tqps) 1279 { 1280 struct hclge_dev *hdev = vport->back; 1281 int i, alloced; 1282 1283 for (i = 0, alloced = 0; i < hdev->num_tqps && 1284 alloced < num_tqps; i++) { 1285 if (!hdev->htqp[i].alloced) { 1286 hdev->htqp[i].q.handle = &vport->nic; 1287 hdev->htqp[i].q.tqp_index = alloced; 1288 tqp[alloced] = &hdev->htqp[i].q; 1289 hdev->htqp[i].alloced = true; 1290 alloced++; 1291 } 1292 } 1293 vport->alloc_tqps = num_tqps; 1294 1295 return 0; 1296 } 1297 1298 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) 1299 { 1300 struct hnae3_handle *nic = &vport->nic; 1301 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1302 struct hclge_dev *hdev = vport->back; 1303 int i, ret; 1304 1305 kinfo->num_desc = hdev->num_desc; 1306 kinfo->rx_buf_len = hdev->rx_buf_len; 1307 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); 1308 kinfo->rss_size 1309 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); 1310 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; 1311 1312 for (i = 0; i < HNAE3_MAX_TC; i++) { 1313 if (hdev->hw_tc_map & BIT(i)) { 1314 kinfo->tc_info[i].enable = true; 1315 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; 1316 kinfo->tc_info[i].tqp_count = kinfo->rss_size; 1317 kinfo->tc_info[i].tc = i; 1318 } else { 1319 /* Set to default queue if TC is disable */ 1320 kinfo->tc_info[i].enable = false; 1321 kinfo->tc_info[i].tqp_offset = 0; 1322 kinfo->tc_info[i].tqp_count = 1; 1323 kinfo->tc_info[i].tc = 0; 1324 } 1325 } 1326 1327 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 1328 sizeof(struct hnae3_queue *), GFP_KERNEL); 1329 if (!kinfo->tqp) 1330 return -ENOMEM; 1331 1332 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps); 1333 if (ret) { 1334 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); 1335 return -EINVAL; 1336 } 1337 1338 return 0; 1339 } 1340 1341 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, 1342 struct hclge_vport *vport) 1343 { 1344 struct hnae3_handle *nic = &vport->nic; 1345 struct hnae3_knic_private_info *kinfo; 1346 u16 i; 1347 1348 kinfo = &nic->kinfo; 1349 for (i = 0; i < kinfo->num_tqps; i++) { 1350 struct hclge_tqp *q = 1351 container_of(kinfo->tqp[i], struct hclge_tqp, q); 1352 bool is_pf; 1353 int ret; 1354 1355 is_pf = !(vport->vport_id); 1356 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, 1357 i, is_pf); 1358 if (ret) 1359 return ret; 1360 } 1361 1362 return 0; 1363 } 1364 1365 static int hclge_map_tqp(struct hclge_dev *hdev) 1366 { 1367 struct hclge_vport *vport = hdev->vport; 1368 u16 i, num_vport; 1369 1370 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1371 for (i = 0; i < num_vport; i++) { 1372 int ret; 1373 1374 ret = hclge_map_tqp_to_vport(hdev, vport); 1375 if (ret) 1376 return ret; 1377 1378 vport++; 1379 } 1380 1381 return 0; 1382 } 1383 1384 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) 1385 { 1386 /* this would be initialized later */ 1387 } 1388 1389 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) 1390 { 1391 struct hnae3_handle *nic = &vport->nic; 1392 struct hclge_dev *hdev = vport->back; 1393 int ret; 1394 1395 nic->pdev = hdev->pdev; 1396 nic->ae_algo = &ae_algo; 1397 nic->numa_node_mask = hdev->numa_node_mask; 1398 1399 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { 1400 ret = hclge_knic_setup(vport, num_tqps); 1401 if (ret) { 1402 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", 1403 ret); 1404 return ret; 1405 } 1406 } else { 1407 hclge_unic_setup(vport, num_tqps); 1408 } 1409 1410 return 0; 1411 } 1412 1413 static int hclge_alloc_vport(struct hclge_dev *hdev) 1414 { 1415 struct pci_dev *pdev = hdev->pdev; 1416 struct hclge_vport *vport; 1417 u32 tqp_main_vport; 1418 u32 tqp_per_vport; 1419 int num_vport, i; 1420 int ret; 1421 1422 /* We need to alloc a vport for main NIC of PF */ 1423 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1424 1425 if (hdev->num_tqps < num_vport) { 1426 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", 1427 hdev->num_tqps, num_vport); 1428 return -EINVAL; 1429 } 1430 1431 /* Alloc the same number of TQPs for every vport */ 1432 tqp_per_vport = hdev->num_tqps / num_vport; 1433 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; 1434 1435 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), 1436 GFP_KERNEL); 1437 if (!vport) 1438 return -ENOMEM; 1439 1440 hdev->vport = vport; 1441 hdev->num_alloc_vport = num_vport; 1442 1443 if (IS_ENABLED(CONFIG_PCI_IOV)) 1444 hdev->num_alloc_vfs = hdev->num_req_vfs; 1445 1446 for (i = 0; i < num_vport; i++) { 1447 vport->back = hdev; 1448 vport->vport_id = i; 1449 1450 if (i == 0) 1451 ret = hclge_vport_setup(vport, tqp_main_vport); 1452 else 1453 ret = hclge_vport_setup(vport, tqp_per_vport); 1454 if (ret) { 1455 dev_err(&pdev->dev, 1456 "vport setup failed for vport %d, %d\n", 1457 i, ret); 1458 return ret; 1459 } 1460 1461 vport++; 1462 } 1463 1464 return 0; 1465 } 1466 1467 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, 1468 struct hclge_pkt_buf_alloc *buf_alloc) 1469 { 1470 /* TX buffer size is unit by 128 byte */ 1471 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 1472 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) 1473 struct hclge_tx_buff_alloc_cmd *req; 1474 struct hclge_desc desc; 1475 int ret; 1476 u8 i; 1477 1478 req = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1479 1480 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); 1481 for (i = 0; i < HCLGE_TC_NUM; i++) { 1482 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 1483 1484 req->tx_pkt_buff[i] = 1485 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | 1486 HCLGE_BUF_SIZE_UPDATE_EN_MSK); 1487 } 1488 1489 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1490 if (ret) { 1491 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", 1492 ret); 1493 return ret; 1494 } 1495 1496 return 0; 1497 } 1498 1499 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, 1500 struct hclge_pkt_buf_alloc *buf_alloc) 1501 { 1502 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); 1503 1504 if (ret) { 1505 dev_err(&hdev->pdev->dev, 1506 "tx buffer alloc failed %d\n", ret); 1507 return ret; 1508 } 1509 1510 return 0; 1511 } 1512 1513 static int hclge_get_tc_num(struct hclge_dev *hdev) 1514 { 1515 int i, cnt = 0; 1516 1517 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1518 if (hdev->hw_tc_map & BIT(i)) 1519 cnt++; 1520 return cnt; 1521 } 1522 1523 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) 1524 { 1525 int i, cnt = 0; 1526 1527 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1528 if (hdev->hw_tc_map & BIT(i) && 1529 hdev->tm_info.hw_pfc_map & BIT(i)) 1530 cnt++; 1531 return cnt; 1532 } 1533 1534 /* Get the number of pfc enabled TCs, which have private buffer */ 1535 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, 1536 struct hclge_pkt_buf_alloc *buf_alloc) 1537 { 1538 struct hclge_priv_buf *priv; 1539 int i, cnt = 0; 1540 1541 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1542 priv = &buf_alloc->priv_buf[i]; 1543 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && 1544 priv->enable) 1545 cnt++; 1546 } 1547 1548 return cnt; 1549 } 1550 1551 /* Get the number of pfc disabled TCs, which have private buffer */ 1552 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, 1553 struct hclge_pkt_buf_alloc *buf_alloc) 1554 { 1555 struct hclge_priv_buf *priv; 1556 int i, cnt = 0; 1557 1558 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1559 priv = &buf_alloc->priv_buf[i]; 1560 if (hdev->hw_tc_map & BIT(i) && 1561 !(hdev->tm_info.hw_pfc_map & BIT(i)) && 1562 priv->enable) 1563 cnt++; 1564 } 1565 1566 return cnt; 1567 } 1568 1569 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1570 { 1571 struct hclge_priv_buf *priv; 1572 u32 rx_priv = 0; 1573 int i; 1574 1575 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1576 priv = &buf_alloc->priv_buf[i]; 1577 if (priv->enable) 1578 rx_priv += priv->buf_size; 1579 } 1580 return rx_priv; 1581 } 1582 1583 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1584 { 1585 u32 i, total_tx_size = 0; 1586 1587 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1588 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 1589 1590 return total_tx_size; 1591 } 1592 1593 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, 1594 struct hclge_pkt_buf_alloc *buf_alloc, 1595 u32 rx_all) 1596 { 1597 u32 shared_buf_min, shared_buf_tc, shared_std; 1598 int tc_num, pfc_enable_num; 1599 u32 shared_buf; 1600 u32 rx_priv; 1601 int i; 1602 1603 tc_num = hclge_get_tc_num(hdev); 1604 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); 1605 1606 if (hnae3_dev_dcb_supported(hdev)) 1607 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; 1608 else 1609 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; 1610 1611 shared_buf_tc = pfc_enable_num * hdev->mps + 1612 (tc_num - pfc_enable_num) * hdev->mps / 2 + 1613 hdev->mps; 1614 shared_std = max_t(u32, shared_buf_min, shared_buf_tc); 1615 1616 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); 1617 if (rx_all <= rx_priv + shared_std) 1618 return false; 1619 1620 shared_buf = rx_all - rx_priv; 1621 buf_alloc->s_buf.buf_size = shared_buf; 1622 buf_alloc->s_buf.self.high = shared_buf; 1623 buf_alloc->s_buf.self.low = 2 * hdev->mps; 1624 1625 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1626 if ((hdev->hw_tc_map & BIT(i)) && 1627 (hdev->tm_info.hw_pfc_map & BIT(i))) { 1628 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; 1629 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; 1630 } else { 1631 buf_alloc->s_buf.tc_thrd[i].low = 0; 1632 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; 1633 } 1634 } 1635 1636 return true; 1637 } 1638 1639 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, 1640 struct hclge_pkt_buf_alloc *buf_alloc) 1641 { 1642 u32 i, total_size; 1643 1644 total_size = hdev->pkt_buf_size; 1645 1646 /* alloc tx buffer for all enabled tc */ 1647 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1648 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1649 1650 if (total_size < HCLGE_DEFAULT_TX_BUF) 1651 return -ENOMEM; 1652 1653 if (hdev->hw_tc_map & BIT(i)) 1654 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; 1655 else 1656 priv->tx_buf_size = 0; 1657 1658 total_size -= priv->tx_buf_size; 1659 } 1660 1661 return 0; 1662 } 1663 1664 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs 1665 * @hdev: pointer to struct hclge_dev 1666 * @buf_alloc: pointer to buffer calculation data 1667 * @return: 0: calculate sucessful, negative: fail 1668 */ 1669 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, 1670 struct hclge_pkt_buf_alloc *buf_alloc) 1671 { 1672 u32 rx_all = hdev->pkt_buf_size; 1673 int no_pfc_priv_num, pfc_priv_num; 1674 struct hclge_priv_buf *priv; 1675 int i; 1676 1677 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); 1678 1679 /* When DCB is not supported, rx private 1680 * buffer is not allocated. 1681 */ 1682 if (!hnae3_dev_dcb_supported(hdev)) { 1683 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1684 return -ENOMEM; 1685 1686 return 0; 1687 } 1688 1689 /* step 1, try to alloc private buffer for all enabled tc */ 1690 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1691 priv = &buf_alloc->priv_buf[i]; 1692 if (hdev->hw_tc_map & BIT(i)) { 1693 priv->enable = 1; 1694 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1695 priv->wl.low = hdev->mps; 1696 priv->wl.high = priv->wl.low + hdev->mps; 1697 priv->buf_size = priv->wl.high + 1698 HCLGE_DEFAULT_DV; 1699 } else { 1700 priv->wl.low = 0; 1701 priv->wl.high = 2 * hdev->mps; 1702 priv->buf_size = priv->wl.high; 1703 } 1704 } else { 1705 priv->enable = 0; 1706 priv->wl.low = 0; 1707 priv->wl.high = 0; 1708 priv->buf_size = 0; 1709 } 1710 } 1711 1712 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1713 return 0; 1714 1715 /* step 2, try to decrease the buffer size of 1716 * no pfc TC's private buffer 1717 */ 1718 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1719 priv = &buf_alloc->priv_buf[i]; 1720 1721 priv->enable = 0; 1722 priv->wl.low = 0; 1723 priv->wl.high = 0; 1724 priv->buf_size = 0; 1725 1726 if (!(hdev->hw_tc_map & BIT(i))) 1727 continue; 1728 1729 priv->enable = 1; 1730 1731 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1732 priv->wl.low = 128; 1733 priv->wl.high = priv->wl.low + hdev->mps; 1734 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; 1735 } else { 1736 priv->wl.low = 0; 1737 priv->wl.high = hdev->mps; 1738 priv->buf_size = priv->wl.high; 1739 } 1740 } 1741 1742 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1743 return 0; 1744 1745 /* step 3, try to reduce the number of pfc disabled TCs, 1746 * which have private buffer 1747 */ 1748 /* get the total no pfc enable TC number, which have private buffer */ 1749 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); 1750 1751 /* let the last to be cleared first */ 1752 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1753 priv = &buf_alloc->priv_buf[i]; 1754 1755 if (hdev->hw_tc_map & BIT(i) && 1756 !(hdev->tm_info.hw_pfc_map & BIT(i))) { 1757 /* Clear the no pfc TC private buffer */ 1758 priv->wl.low = 0; 1759 priv->wl.high = 0; 1760 priv->buf_size = 0; 1761 priv->enable = 0; 1762 no_pfc_priv_num--; 1763 } 1764 1765 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1766 no_pfc_priv_num == 0) 1767 break; 1768 } 1769 1770 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1771 return 0; 1772 1773 /* step 4, try to reduce the number of pfc enabled TCs 1774 * which have private buffer. 1775 */ 1776 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); 1777 1778 /* let the last to be cleared first */ 1779 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1780 priv = &buf_alloc->priv_buf[i]; 1781 1782 if (hdev->hw_tc_map & BIT(i) && 1783 hdev->tm_info.hw_pfc_map & BIT(i)) { 1784 /* Reduce the number of pfc TC with private buffer */ 1785 priv->wl.low = 0; 1786 priv->enable = 0; 1787 priv->wl.high = 0; 1788 priv->buf_size = 0; 1789 pfc_priv_num--; 1790 } 1791 1792 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1793 pfc_priv_num == 0) 1794 break; 1795 } 1796 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1797 return 0; 1798 1799 return -ENOMEM; 1800 } 1801 1802 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, 1803 struct hclge_pkt_buf_alloc *buf_alloc) 1804 { 1805 struct hclge_rx_priv_buff_cmd *req; 1806 struct hclge_desc desc; 1807 int ret; 1808 int i; 1809 1810 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); 1811 req = (struct hclge_rx_priv_buff_cmd *)desc.data; 1812 1813 /* Alloc private buffer TCs */ 1814 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1815 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1816 1817 req->buf_num[i] = 1818 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); 1819 req->buf_num[i] |= 1820 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); 1821 } 1822 1823 req->shared_buf = 1824 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 1825 (1 << HCLGE_TC0_PRI_BUF_EN_B)); 1826 1827 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1828 if (ret) { 1829 dev_err(&hdev->pdev->dev, 1830 "rx private buffer alloc cmd failed %d\n", ret); 1831 return ret; 1832 } 1833 1834 return 0; 1835 } 1836 1837 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) 1838 1839 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, 1840 struct hclge_pkt_buf_alloc *buf_alloc) 1841 { 1842 struct hclge_rx_priv_wl_buf *req; 1843 struct hclge_priv_buf *priv; 1844 struct hclge_desc desc[2]; 1845 int i, j; 1846 int ret; 1847 1848 for (i = 0; i < 2; i++) { 1849 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, 1850 false); 1851 req = (struct hclge_rx_priv_wl_buf *)desc[i].data; 1852 1853 /* The first descriptor set the NEXT bit to 1 */ 1854 if (i == 0) 1855 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1856 else 1857 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1858 1859 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1860 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; 1861 1862 priv = &buf_alloc->priv_buf[idx]; 1863 req->tc_wl[j].high = 1864 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); 1865 req->tc_wl[j].high |= 1866 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) << 1867 HCLGE_RX_PRIV_EN_B); 1868 req->tc_wl[j].low = 1869 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); 1870 req->tc_wl[j].low |= 1871 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) << 1872 HCLGE_RX_PRIV_EN_B); 1873 } 1874 } 1875 1876 /* Send 2 descriptor at one time */ 1877 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1878 if (ret) { 1879 dev_err(&hdev->pdev->dev, 1880 "rx private waterline config cmd failed %d\n", 1881 ret); 1882 return ret; 1883 } 1884 return 0; 1885 } 1886 1887 static int hclge_common_thrd_config(struct hclge_dev *hdev, 1888 struct hclge_pkt_buf_alloc *buf_alloc) 1889 { 1890 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; 1891 struct hclge_rx_com_thrd *req; 1892 struct hclge_desc desc[2]; 1893 struct hclge_tc_thrd *tc; 1894 int i, j; 1895 int ret; 1896 1897 for (i = 0; i < 2; i++) { 1898 hclge_cmd_setup_basic_desc(&desc[i], 1899 HCLGE_OPC_RX_COM_THRD_ALLOC, false); 1900 req = (struct hclge_rx_com_thrd *)&desc[i].data; 1901 1902 /* The first descriptor set the NEXT bit to 1 */ 1903 if (i == 0) 1904 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1905 else 1906 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1907 1908 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1909 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; 1910 1911 req->com_thrd[j].high = 1912 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); 1913 req->com_thrd[j].high |= 1914 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) << 1915 HCLGE_RX_PRIV_EN_B); 1916 req->com_thrd[j].low = 1917 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); 1918 req->com_thrd[j].low |= 1919 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) << 1920 HCLGE_RX_PRIV_EN_B); 1921 } 1922 } 1923 1924 /* Send 2 descriptors at one time */ 1925 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1926 if (ret) { 1927 dev_err(&hdev->pdev->dev, 1928 "common threshold config cmd failed %d\n", ret); 1929 return ret; 1930 } 1931 return 0; 1932 } 1933 1934 static int hclge_common_wl_config(struct hclge_dev *hdev, 1935 struct hclge_pkt_buf_alloc *buf_alloc) 1936 { 1937 struct hclge_shared_buf *buf = &buf_alloc->s_buf; 1938 struct hclge_rx_com_wl *req; 1939 struct hclge_desc desc; 1940 int ret; 1941 1942 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); 1943 1944 req = (struct hclge_rx_com_wl *)desc.data; 1945 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); 1946 req->com_wl.high |= 1947 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) << 1948 HCLGE_RX_PRIV_EN_B); 1949 1950 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); 1951 req->com_wl.low |= 1952 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) << 1953 HCLGE_RX_PRIV_EN_B); 1954 1955 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1956 if (ret) { 1957 dev_err(&hdev->pdev->dev, 1958 "common waterline config cmd failed %d\n", ret); 1959 return ret; 1960 } 1961 1962 return 0; 1963 } 1964 1965 int hclge_buffer_alloc(struct hclge_dev *hdev) 1966 { 1967 struct hclge_pkt_buf_alloc *pkt_buf; 1968 int ret; 1969 1970 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); 1971 if (!pkt_buf) 1972 return -ENOMEM; 1973 1974 ret = hclge_tx_buffer_calc(hdev, pkt_buf); 1975 if (ret) { 1976 dev_err(&hdev->pdev->dev, 1977 "could not calc tx buffer size for all TCs %d\n", ret); 1978 goto out; 1979 } 1980 1981 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); 1982 if (ret) { 1983 dev_err(&hdev->pdev->dev, 1984 "could not alloc tx buffers %d\n", ret); 1985 goto out; 1986 } 1987 1988 ret = hclge_rx_buffer_calc(hdev, pkt_buf); 1989 if (ret) { 1990 dev_err(&hdev->pdev->dev, 1991 "could not calc rx priv buffer size for all TCs %d\n", 1992 ret); 1993 goto out; 1994 } 1995 1996 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); 1997 if (ret) { 1998 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", 1999 ret); 2000 goto out; 2001 } 2002 2003 if (hnae3_dev_dcb_supported(hdev)) { 2004 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); 2005 if (ret) { 2006 dev_err(&hdev->pdev->dev, 2007 "could not configure rx private waterline %d\n", 2008 ret); 2009 goto out; 2010 } 2011 2012 ret = hclge_common_thrd_config(hdev, pkt_buf); 2013 if (ret) { 2014 dev_err(&hdev->pdev->dev, 2015 "could not configure common threshold %d\n", 2016 ret); 2017 goto out; 2018 } 2019 } 2020 2021 ret = hclge_common_wl_config(hdev, pkt_buf); 2022 if (ret) 2023 dev_err(&hdev->pdev->dev, 2024 "could not configure common waterline %d\n", ret); 2025 2026 out: 2027 kfree(pkt_buf); 2028 return ret; 2029 } 2030 2031 static int hclge_init_roce_base_info(struct hclge_vport *vport) 2032 { 2033 struct hnae3_handle *roce = &vport->roce; 2034 struct hnae3_handle *nic = &vport->nic; 2035 2036 roce->rinfo.num_vectors = vport->back->num_roce_msi; 2037 2038 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || 2039 vport->back->num_msi_left == 0) 2040 return -EINVAL; 2041 2042 roce->rinfo.base_vector = vport->back->roce_base_vector; 2043 2044 roce->rinfo.netdev = nic->kinfo.netdev; 2045 roce->rinfo.roce_io_base = vport->back->hw.io_base; 2046 2047 roce->pdev = nic->pdev; 2048 roce->ae_algo = nic->ae_algo; 2049 roce->numa_node_mask = nic->numa_node_mask; 2050 2051 return 0; 2052 } 2053 2054 static int hclge_init_msi(struct hclge_dev *hdev) 2055 { 2056 struct pci_dev *pdev = hdev->pdev; 2057 int vectors; 2058 int i; 2059 2060 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 2061 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2062 if (vectors < 0) { 2063 dev_err(&pdev->dev, 2064 "failed(%d) to allocate MSI/MSI-X vectors\n", 2065 vectors); 2066 return vectors; 2067 } 2068 if (vectors < hdev->num_msi) 2069 dev_warn(&hdev->pdev->dev, 2070 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2071 hdev->num_msi, vectors); 2072 2073 hdev->num_msi = vectors; 2074 hdev->num_msi_left = vectors; 2075 hdev->base_msi_vector = pdev->irq; 2076 hdev->roce_base_vector = hdev->base_msi_vector + 2077 HCLGE_ROCE_VECTOR_OFFSET; 2078 2079 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2080 sizeof(u16), GFP_KERNEL); 2081 if (!hdev->vector_status) { 2082 pci_free_irq_vectors(pdev); 2083 return -ENOMEM; 2084 } 2085 2086 for (i = 0; i < hdev->num_msi; i++) 2087 hdev->vector_status[i] = HCLGE_INVALID_VPORT; 2088 2089 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2090 sizeof(int), GFP_KERNEL); 2091 if (!hdev->vector_irq) { 2092 pci_free_irq_vectors(pdev); 2093 return -ENOMEM; 2094 } 2095 2096 return 0; 2097 } 2098 2099 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) 2100 { 2101 struct hclge_mac *mac = &hdev->hw.mac; 2102 2103 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) 2104 mac->duplex = (u8)duplex; 2105 else 2106 mac->duplex = HCLGE_MAC_FULL; 2107 2108 mac->speed = speed; 2109 } 2110 2111 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) 2112 { 2113 struct hclge_config_mac_speed_dup_cmd *req; 2114 struct hclge_desc desc; 2115 int ret; 2116 2117 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 2118 2119 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); 2120 2121 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); 2122 2123 switch (speed) { 2124 case HCLGE_MAC_SPEED_10M: 2125 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2126 HCLGE_CFG_SPEED_S, 6); 2127 break; 2128 case HCLGE_MAC_SPEED_100M: 2129 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2130 HCLGE_CFG_SPEED_S, 7); 2131 break; 2132 case HCLGE_MAC_SPEED_1G: 2133 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2134 HCLGE_CFG_SPEED_S, 0); 2135 break; 2136 case HCLGE_MAC_SPEED_10G: 2137 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2138 HCLGE_CFG_SPEED_S, 1); 2139 break; 2140 case HCLGE_MAC_SPEED_25G: 2141 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2142 HCLGE_CFG_SPEED_S, 2); 2143 break; 2144 case HCLGE_MAC_SPEED_40G: 2145 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2146 HCLGE_CFG_SPEED_S, 3); 2147 break; 2148 case HCLGE_MAC_SPEED_50G: 2149 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2150 HCLGE_CFG_SPEED_S, 4); 2151 break; 2152 case HCLGE_MAC_SPEED_100G: 2153 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2154 HCLGE_CFG_SPEED_S, 5); 2155 break; 2156 default: 2157 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); 2158 return -EINVAL; 2159 } 2160 2161 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 2162 1); 2163 2164 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2165 if (ret) { 2166 dev_err(&hdev->pdev->dev, 2167 "mac speed/duplex config cmd failed %d.\n", ret); 2168 return ret; 2169 } 2170 2171 hclge_check_speed_dup(hdev, duplex, speed); 2172 2173 return 0; 2174 } 2175 2176 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, 2177 u8 duplex) 2178 { 2179 struct hclge_vport *vport = hclge_get_vport(handle); 2180 struct hclge_dev *hdev = vport->back; 2181 2182 return hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2183 } 2184 2185 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, 2186 u8 *duplex) 2187 { 2188 struct hclge_query_an_speed_dup_cmd *req; 2189 struct hclge_desc desc; 2190 int speed_tmp; 2191 int ret; 2192 2193 req = (struct hclge_query_an_speed_dup_cmd *)desc.data; 2194 2195 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); 2196 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2197 if (ret) { 2198 dev_err(&hdev->pdev->dev, 2199 "mac speed/autoneg/duplex query cmd failed %d\n", 2200 ret); 2201 return ret; 2202 } 2203 2204 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); 2205 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, 2206 HCLGE_QUERY_SPEED_S); 2207 2208 ret = hclge_parse_speed(speed_tmp, speed); 2209 if (ret) { 2210 dev_err(&hdev->pdev->dev, 2211 "could not parse speed(=%d), %d\n", speed_tmp, ret); 2212 return -EIO; 2213 } 2214 2215 return 0; 2216 } 2217 2218 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) 2219 { 2220 struct hclge_config_auto_neg_cmd *req; 2221 struct hclge_desc desc; 2222 u32 flag = 0; 2223 int ret; 2224 2225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); 2226 2227 req = (struct hclge_config_auto_neg_cmd *)desc.data; 2228 hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); 2229 req->cfg_an_cmd_flag = cpu_to_le32(flag); 2230 2231 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2232 if (ret) { 2233 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", 2234 ret); 2235 return ret; 2236 } 2237 2238 return 0; 2239 } 2240 2241 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) 2242 { 2243 struct hclge_vport *vport = hclge_get_vport(handle); 2244 struct hclge_dev *hdev = vport->back; 2245 2246 return hclge_set_autoneg_en(hdev, enable); 2247 } 2248 2249 static int hclge_get_autoneg(struct hnae3_handle *handle) 2250 { 2251 struct hclge_vport *vport = hclge_get_vport(handle); 2252 struct hclge_dev *hdev = vport->back; 2253 struct phy_device *phydev = hdev->hw.mac.phydev; 2254 2255 if (phydev) 2256 return phydev->autoneg; 2257 2258 return hdev->hw.mac.autoneg; 2259 } 2260 2261 static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, 2262 bool mask_vlan, 2263 u8 *mac_mask) 2264 { 2265 struct hclge_mac_vlan_mask_entry_cmd *req; 2266 struct hclge_desc desc; 2267 int status; 2268 2269 req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; 2270 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); 2271 2272 hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, 2273 mask_vlan ? 1 : 0); 2274 ether_addr_copy(req->mac_mask, mac_mask); 2275 2276 status = hclge_cmd_send(&hdev->hw, &desc, 1); 2277 if (status) 2278 dev_err(&hdev->pdev->dev, 2279 "Config mac_vlan_mask failed for cmd_send, ret =%d\n", 2280 status); 2281 2282 return status; 2283 } 2284 2285 static int hclge_mac_init(struct hclge_dev *hdev) 2286 { 2287 struct hnae3_handle *handle = &hdev->vport[0].nic; 2288 struct net_device *netdev = handle->kinfo.netdev; 2289 struct hclge_mac *mac = &hdev->hw.mac; 2290 u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 2291 int mtu; 2292 int ret; 2293 2294 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); 2295 if (ret) { 2296 dev_err(&hdev->pdev->dev, 2297 "Config mac speed dup fail ret=%d\n", ret); 2298 return ret; 2299 } 2300 2301 mac->link = 0; 2302 2303 /* Initialize the MTA table work mode */ 2304 hdev->accept_mta_mc = true; 2305 hdev->enable_mta = true; 2306 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; 2307 2308 ret = hclge_set_mta_filter_mode(hdev, 2309 hdev->mta_mac_sel_type, 2310 hdev->enable_mta); 2311 if (ret) { 2312 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", 2313 ret); 2314 return ret; 2315 } 2316 2317 ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); 2318 if (ret) { 2319 dev_err(&hdev->pdev->dev, 2320 "set mta filter mode fail ret=%d\n", ret); 2321 return ret; 2322 } 2323 2324 ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); 2325 if (ret) { 2326 dev_err(&hdev->pdev->dev, 2327 "set default mac_vlan_mask fail ret=%d\n", ret); 2328 return ret; 2329 } 2330 2331 if (netdev) 2332 mtu = netdev->mtu; 2333 else 2334 mtu = ETH_DATA_LEN; 2335 2336 ret = hclge_set_mtu(handle, mtu); 2337 if (ret) { 2338 dev_err(&hdev->pdev->dev, 2339 "set mtu failed ret=%d\n", ret); 2340 return ret; 2341 } 2342 2343 return 0; 2344 } 2345 2346 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) 2347 { 2348 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2349 schedule_work(&hdev->mbx_service_task); 2350 } 2351 2352 static void hclge_reset_task_schedule(struct hclge_dev *hdev) 2353 { 2354 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) 2355 schedule_work(&hdev->rst_service_task); 2356 } 2357 2358 static void hclge_task_schedule(struct hclge_dev *hdev) 2359 { 2360 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && 2361 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2362 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) 2363 (void)schedule_work(&hdev->service_task); 2364 } 2365 2366 static int hclge_get_mac_link_status(struct hclge_dev *hdev) 2367 { 2368 struct hclge_link_status_cmd *req; 2369 struct hclge_desc desc; 2370 int link_status; 2371 int ret; 2372 2373 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); 2374 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2375 if (ret) { 2376 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", 2377 ret); 2378 return ret; 2379 } 2380 2381 req = (struct hclge_link_status_cmd *)desc.data; 2382 link_status = req->status & HCLGE_LINK_STATUS; 2383 2384 return !!link_status; 2385 } 2386 2387 static int hclge_get_mac_phy_link(struct hclge_dev *hdev) 2388 { 2389 int mac_state; 2390 int link_stat; 2391 2392 mac_state = hclge_get_mac_link_status(hdev); 2393 2394 if (hdev->hw.mac.phydev) { 2395 if (!genphy_read_status(hdev->hw.mac.phydev)) 2396 link_stat = mac_state & 2397 hdev->hw.mac.phydev->link; 2398 else 2399 link_stat = 0; 2400 2401 } else { 2402 link_stat = mac_state; 2403 } 2404 2405 return !!link_stat; 2406 } 2407 2408 static void hclge_update_link_status(struct hclge_dev *hdev) 2409 { 2410 struct hnae3_client *client = hdev->nic_client; 2411 struct hnae3_handle *handle; 2412 int state; 2413 int i; 2414 2415 if (!client) 2416 return; 2417 state = hclge_get_mac_phy_link(hdev); 2418 if (state != hdev->hw.mac.link) { 2419 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2420 handle = &hdev->vport[i].nic; 2421 client->ops->link_status_change(handle, state); 2422 } 2423 hdev->hw.mac.link = state; 2424 } 2425 } 2426 2427 static int hclge_update_speed_duplex(struct hclge_dev *hdev) 2428 { 2429 struct hclge_mac mac = hdev->hw.mac; 2430 u8 duplex; 2431 int speed; 2432 int ret; 2433 2434 /* get the speed and duplex as autoneg'result from mac cmd when phy 2435 * doesn't exit. 2436 */ 2437 if (mac.phydev || !mac.autoneg) 2438 return 0; 2439 2440 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); 2441 if (ret) { 2442 dev_err(&hdev->pdev->dev, 2443 "mac autoneg/speed/duplex query failed %d\n", ret); 2444 return ret; 2445 } 2446 2447 if ((mac.speed != speed) || (mac.duplex != duplex)) { 2448 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2449 if (ret) { 2450 dev_err(&hdev->pdev->dev, 2451 "mac speed/duplex config failed %d\n", ret); 2452 return ret; 2453 } 2454 } 2455 2456 return 0; 2457 } 2458 2459 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) 2460 { 2461 struct hclge_vport *vport = hclge_get_vport(handle); 2462 struct hclge_dev *hdev = vport->back; 2463 2464 return hclge_update_speed_duplex(hdev); 2465 } 2466 2467 static int hclge_get_status(struct hnae3_handle *handle) 2468 { 2469 struct hclge_vport *vport = hclge_get_vport(handle); 2470 struct hclge_dev *hdev = vport->back; 2471 2472 hclge_update_link_status(hdev); 2473 2474 return hdev->hw.mac.link; 2475 } 2476 2477 static void hclge_service_timer(struct timer_list *t) 2478 { 2479 struct hclge_dev *hdev = from_timer(hdev, t, service_timer); 2480 2481 mod_timer(&hdev->service_timer, jiffies + HZ); 2482 hdev->hw_stats.stats_timer++; 2483 hclge_task_schedule(hdev); 2484 } 2485 2486 static void hclge_service_complete(struct hclge_dev *hdev) 2487 { 2488 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); 2489 2490 /* Flush memory before next watchdog */ 2491 smp_mb__before_atomic(); 2492 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); 2493 } 2494 2495 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) 2496 { 2497 u32 rst_src_reg; 2498 u32 cmdq_src_reg; 2499 2500 /* fetch the events from their corresponding regs */ 2501 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); 2502 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); 2503 2504 /* Assumption: If by any chance reset and mailbox events are reported 2505 * together then we will only process reset event in this go and will 2506 * defer the processing of the mailbox events. Since, we would have not 2507 * cleared RX CMDQ event this time we would receive again another 2508 * interrupt from H/W just for the mailbox. 2509 */ 2510 2511 /* check for vector0 reset event sources */ 2512 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { 2513 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); 2514 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 2515 return HCLGE_VECTOR0_EVENT_RST; 2516 } 2517 2518 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { 2519 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); 2520 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); 2521 return HCLGE_VECTOR0_EVENT_RST; 2522 } 2523 2524 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { 2525 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); 2526 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 2527 return HCLGE_VECTOR0_EVENT_RST; 2528 } 2529 2530 /* check for vector0 mailbox(=CMDQ RX) event source */ 2531 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 2532 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); 2533 *clearval = cmdq_src_reg; 2534 return HCLGE_VECTOR0_EVENT_MBX; 2535 } 2536 2537 return HCLGE_VECTOR0_EVENT_OTHER; 2538 } 2539 2540 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, 2541 u32 regclr) 2542 { 2543 switch (event_type) { 2544 case HCLGE_VECTOR0_EVENT_RST: 2545 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); 2546 break; 2547 case HCLGE_VECTOR0_EVENT_MBX: 2548 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); 2549 break; 2550 } 2551 } 2552 2553 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) 2554 { 2555 writel(enable ? 1 : 0, vector->addr); 2556 } 2557 2558 static irqreturn_t hclge_misc_irq_handle(int irq, void *data) 2559 { 2560 struct hclge_dev *hdev = data; 2561 u32 event_cause; 2562 u32 clearval; 2563 2564 hclge_enable_vector(&hdev->misc_vector, false); 2565 event_cause = hclge_check_event_cause(hdev, &clearval); 2566 2567 /* vector 0 interrupt is shared with reset and mailbox source events.*/ 2568 switch (event_cause) { 2569 case HCLGE_VECTOR0_EVENT_RST: 2570 hclge_reset_task_schedule(hdev); 2571 break; 2572 case HCLGE_VECTOR0_EVENT_MBX: 2573 /* If we are here then, 2574 * 1. Either we are not handling any mbx task and we are not 2575 * scheduled as well 2576 * OR 2577 * 2. We could be handling a mbx task but nothing more is 2578 * scheduled. 2579 * In both cases, we should schedule mbx task as there are more 2580 * mbx messages reported by this interrupt. 2581 */ 2582 hclge_mbx_task_schedule(hdev); 2583 2584 default: 2585 dev_dbg(&hdev->pdev->dev, 2586 "received unknown or unhandled event of vector0\n"); 2587 break; 2588 } 2589 2590 /* we should clear the source of interrupt */ 2591 hclge_clear_event_cause(hdev, event_cause, clearval); 2592 hclge_enable_vector(&hdev->misc_vector, true); 2593 2594 return IRQ_HANDLED; 2595 } 2596 2597 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) 2598 { 2599 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; 2600 hdev->num_msi_left += 1; 2601 hdev->num_msi_used -= 1; 2602 } 2603 2604 static void hclge_get_misc_vector(struct hclge_dev *hdev) 2605 { 2606 struct hclge_misc_vector *vector = &hdev->misc_vector; 2607 2608 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); 2609 2610 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; 2611 hdev->vector_status[0] = 0; 2612 2613 hdev->num_msi_left -= 1; 2614 hdev->num_msi_used += 1; 2615 } 2616 2617 static int hclge_misc_irq_init(struct hclge_dev *hdev) 2618 { 2619 int ret; 2620 2621 hclge_get_misc_vector(hdev); 2622 2623 /* this would be explicitly freed in the end */ 2624 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, 2625 0, "hclge_misc", hdev); 2626 if (ret) { 2627 hclge_free_vector(hdev, 0); 2628 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", 2629 hdev->misc_vector.vector_irq); 2630 } 2631 2632 return ret; 2633 } 2634 2635 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) 2636 { 2637 free_irq(hdev->misc_vector.vector_irq, hdev); 2638 hclge_free_vector(hdev, 0); 2639 } 2640 2641 static int hclge_notify_client(struct hclge_dev *hdev, 2642 enum hnae3_reset_notify_type type) 2643 { 2644 struct hnae3_client *client = hdev->nic_client; 2645 u16 i; 2646 2647 if (!client->ops->reset_notify) 2648 return -EOPNOTSUPP; 2649 2650 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2651 struct hnae3_handle *handle = &hdev->vport[i].nic; 2652 int ret; 2653 2654 ret = client->ops->reset_notify(handle, type); 2655 if (ret) 2656 return ret; 2657 } 2658 2659 return 0; 2660 } 2661 2662 static int hclge_reset_wait(struct hclge_dev *hdev) 2663 { 2664 #define HCLGE_RESET_WATI_MS 100 2665 #define HCLGE_RESET_WAIT_CNT 5 2666 u32 val, reg, reg_bit; 2667 u32 cnt = 0; 2668 2669 switch (hdev->reset_type) { 2670 case HNAE3_GLOBAL_RESET: 2671 reg = HCLGE_GLOBAL_RESET_REG; 2672 reg_bit = HCLGE_GLOBAL_RESET_BIT; 2673 break; 2674 case HNAE3_CORE_RESET: 2675 reg = HCLGE_GLOBAL_RESET_REG; 2676 reg_bit = HCLGE_CORE_RESET_BIT; 2677 break; 2678 case HNAE3_FUNC_RESET: 2679 reg = HCLGE_FUN_RST_ING; 2680 reg_bit = HCLGE_FUN_RST_ING_B; 2681 break; 2682 default: 2683 dev_err(&hdev->pdev->dev, 2684 "Wait for unsupported reset type: %d\n", 2685 hdev->reset_type); 2686 return -EINVAL; 2687 } 2688 2689 val = hclge_read_dev(&hdev->hw, reg); 2690 while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { 2691 msleep(HCLGE_RESET_WATI_MS); 2692 val = hclge_read_dev(&hdev->hw, reg); 2693 cnt++; 2694 } 2695 2696 if (cnt >= HCLGE_RESET_WAIT_CNT) { 2697 dev_warn(&hdev->pdev->dev, 2698 "Wait for reset timeout: %d\n", hdev->reset_type); 2699 return -EBUSY; 2700 } 2701 2702 return 0; 2703 } 2704 2705 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) 2706 { 2707 struct hclge_desc desc; 2708 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; 2709 int ret; 2710 2711 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 2712 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0); 2713 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); 2714 req->fun_reset_vfid = func_id; 2715 2716 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2717 if (ret) 2718 dev_err(&hdev->pdev->dev, 2719 "send function reset cmd fail, status =%d\n", ret); 2720 2721 return ret; 2722 } 2723 2724 static void hclge_do_reset(struct hclge_dev *hdev) 2725 { 2726 struct pci_dev *pdev = hdev->pdev; 2727 u32 val; 2728 2729 switch (hdev->reset_type) { 2730 case HNAE3_GLOBAL_RESET: 2731 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2732 hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); 2733 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2734 dev_info(&pdev->dev, "Global Reset requested\n"); 2735 break; 2736 case HNAE3_CORE_RESET: 2737 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2738 hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1); 2739 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2740 dev_info(&pdev->dev, "Core Reset requested\n"); 2741 break; 2742 case HNAE3_FUNC_RESET: 2743 dev_info(&pdev->dev, "PF Reset requested\n"); 2744 hclge_func_reset_cmd(hdev, 0); 2745 /* schedule again to check later */ 2746 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); 2747 hclge_reset_task_schedule(hdev); 2748 break; 2749 default: 2750 dev_warn(&pdev->dev, 2751 "Unsupported reset type: %d\n", hdev->reset_type); 2752 break; 2753 } 2754 } 2755 2756 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, 2757 unsigned long *addr) 2758 { 2759 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 2760 2761 /* return the highest priority reset level amongst all */ 2762 if (test_bit(HNAE3_GLOBAL_RESET, addr)) 2763 rst_level = HNAE3_GLOBAL_RESET; 2764 else if (test_bit(HNAE3_CORE_RESET, addr)) 2765 rst_level = HNAE3_CORE_RESET; 2766 else if (test_bit(HNAE3_IMP_RESET, addr)) 2767 rst_level = HNAE3_IMP_RESET; 2768 else if (test_bit(HNAE3_FUNC_RESET, addr)) 2769 rst_level = HNAE3_FUNC_RESET; 2770 2771 /* now, clear all other resets */ 2772 clear_bit(HNAE3_GLOBAL_RESET, addr); 2773 clear_bit(HNAE3_CORE_RESET, addr); 2774 clear_bit(HNAE3_IMP_RESET, addr); 2775 clear_bit(HNAE3_FUNC_RESET, addr); 2776 2777 return rst_level; 2778 } 2779 2780 static void hclge_reset(struct hclge_dev *hdev) 2781 { 2782 /* perform reset of the stack & ae device for a client */ 2783 2784 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 2785 2786 if (!hclge_reset_wait(hdev)) { 2787 rtnl_lock(); 2788 hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); 2789 hclge_reset_ae_dev(hdev->ae_dev); 2790 hclge_notify_client(hdev, HNAE3_INIT_CLIENT); 2791 rtnl_unlock(); 2792 } else { 2793 /* schedule again to check pending resets later */ 2794 set_bit(hdev->reset_type, &hdev->reset_pending); 2795 hclge_reset_task_schedule(hdev); 2796 } 2797 2798 hclge_notify_client(hdev, HNAE3_UP_CLIENT); 2799 } 2800 2801 static void hclge_reset_event(struct hnae3_handle *handle) 2802 { 2803 struct hclge_vport *vport = hclge_get_vport(handle); 2804 struct hclge_dev *hdev = vport->back; 2805 2806 /* check if this is a new reset request and we are not here just because 2807 * last reset attempt did not succeed and watchdog hit us again. We will 2808 * know this if last reset request did not occur very recently (watchdog 2809 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) 2810 * In case of new request we reset the "reset level" to PF reset. 2811 */ 2812 if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) 2813 handle->reset_level = HNAE3_FUNC_RESET; 2814 2815 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", 2816 handle->reset_level); 2817 2818 /* request reset & schedule reset task */ 2819 set_bit(handle->reset_level, &hdev->reset_request); 2820 hclge_reset_task_schedule(hdev); 2821 2822 if (handle->reset_level < HNAE3_GLOBAL_RESET) 2823 handle->reset_level++; 2824 2825 handle->last_reset_time = jiffies; 2826 } 2827 2828 static void hclge_reset_subtask(struct hclge_dev *hdev) 2829 { 2830 /* check if there is any ongoing reset in the hardware. This status can 2831 * be checked from reset_pending. If there is then, we need to wait for 2832 * hardware to complete reset. 2833 * a. If we are able to figure out in reasonable time that hardware 2834 * has fully resetted then, we can proceed with driver, client 2835 * reset. 2836 * b. else, we can come back later to check this status so re-sched 2837 * now. 2838 */ 2839 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); 2840 if (hdev->reset_type != HNAE3_NONE_RESET) 2841 hclge_reset(hdev); 2842 2843 /* check if we got any *new* reset requests to be honored */ 2844 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); 2845 if (hdev->reset_type != HNAE3_NONE_RESET) 2846 hclge_do_reset(hdev); 2847 2848 hdev->reset_type = HNAE3_NONE_RESET; 2849 } 2850 2851 static void hclge_reset_service_task(struct work_struct *work) 2852 { 2853 struct hclge_dev *hdev = 2854 container_of(work, struct hclge_dev, rst_service_task); 2855 2856 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 2857 return; 2858 2859 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 2860 2861 hclge_reset_subtask(hdev); 2862 2863 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 2864 } 2865 2866 static void hclge_mailbox_service_task(struct work_struct *work) 2867 { 2868 struct hclge_dev *hdev = 2869 container_of(work, struct hclge_dev, mbx_service_task); 2870 2871 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) 2872 return; 2873 2874 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 2875 2876 hclge_mbx_handler(hdev); 2877 2878 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 2879 } 2880 2881 static void hclge_service_task(struct work_struct *work) 2882 { 2883 struct hclge_dev *hdev = 2884 container_of(work, struct hclge_dev, service_task); 2885 2886 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { 2887 hclge_update_stats_for_all(hdev); 2888 hdev->hw_stats.stats_timer = 0; 2889 } 2890 2891 hclge_update_speed_duplex(hdev); 2892 hclge_update_link_status(hdev); 2893 hclge_service_complete(hdev); 2894 } 2895 2896 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) 2897 { 2898 /* VF handle has no client */ 2899 if (!handle->client) 2900 return container_of(handle, struct hclge_vport, nic); 2901 else if (handle->client->type == HNAE3_CLIENT_ROCE) 2902 return container_of(handle, struct hclge_vport, roce); 2903 else 2904 return container_of(handle, struct hclge_vport, nic); 2905 } 2906 2907 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, 2908 struct hnae3_vector_info *vector_info) 2909 { 2910 struct hclge_vport *vport = hclge_get_vport(handle); 2911 struct hnae3_vector_info *vector = vector_info; 2912 struct hclge_dev *hdev = vport->back; 2913 int alloc = 0; 2914 int i, j; 2915 2916 vector_num = min(hdev->num_msi_left, vector_num); 2917 2918 for (j = 0; j < vector_num; j++) { 2919 for (i = 1; i < hdev->num_msi; i++) { 2920 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { 2921 vector->vector = pci_irq_vector(hdev->pdev, i); 2922 vector->io_addr = hdev->hw.io_base + 2923 HCLGE_VECTOR_REG_BASE + 2924 (i - 1) * HCLGE_VECTOR_REG_OFFSET + 2925 vport->vport_id * 2926 HCLGE_VECTOR_VF_OFFSET; 2927 hdev->vector_status[i] = vport->vport_id; 2928 hdev->vector_irq[i] = vector->vector; 2929 2930 vector++; 2931 alloc++; 2932 2933 break; 2934 } 2935 } 2936 } 2937 hdev->num_msi_left -= alloc; 2938 hdev->num_msi_used += alloc; 2939 2940 return alloc; 2941 } 2942 2943 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) 2944 { 2945 int i; 2946 2947 for (i = 0; i < hdev->num_msi; i++) 2948 if (vector == hdev->vector_irq[i]) 2949 return i; 2950 2951 return -EINVAL; 2952 } 2953 2954 static int hclge_put_vector(struct hnae3_handle *handle, int vector) 2955 { 2956 struct hclge_vport *vport = hclge_get_vport(handle); 2957 struct hclge_dev *hdev = vport->back; 2958 int vector_id; 2959 2960 vector_id = hclge_get_vector_index(hdev, vector); 2961 if (vector_id < 0) { 2962 dev_err(&hdev->pdev->dev, 2963 "Get vector index fail. vector_id =%d\n", vector_id); 2964 return vector_id; 2965 } 2966 2967 hclge_free_vector(hdev, vector_id); 2968 2969 return 0; 2970 } 2971 2972 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) 2973 { 2974 return HCLGE_RSS_KEY_SIZE; 2975 } 2976 2977 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) 2978 { 2979 return HCLGE_RSS_IND_TBL_SIZE; 2980 } 2981 2982 static int hclge_set_rss_algo_key(struct hclge_dev *hdev, 2983 const u8 hfunc, const u8 *key) 2984 { 2985 struct hclge_rss_config_cmd *req; 2986 struct hclge_desc desc; 2987 int key_offset; 2988 int key_size; 2989 int ret; 2990 2991 req = (struct hclge_rss_config_cmd *)desc.data; 2992 2993 for (key_offset = 0; key_offset < 3; key_offset++) { 2994 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, 2995 false); 2996 2997 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); 2998 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); 2999 3000 if (key_offset == 2) 3001 key_size = 3002 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; 3003 else 3004 key_size = HCLGE_RSS_HASH_KEY_NUM; 3005 3006 memcpy(req->hash_key, 3007 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); 3008 3009 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3010 if (ret) { 3011 dev_err(&hdev->pdev->dev, 3012 "Configure RSS config fail, status = %d\n", 3013 ret); 3014 return ret; 3015 } 3016 } 3017 return 0; 3018 } 3019 3020 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) 3021 { 3022 struct hclge_rss_indirection_table_cmd *req; 3023 struct hclge_desc desc; 3024 int i, j; 3025 int ret; 3026 3027 req = (struct hclge_rss_indirection_table_cmd *)desc.data; 3028 3029 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { 3030 hclge_cmd_setup_basic_desc 3031 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); 3032 3033 req->start_table_index = 3034 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); 3035 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); 3036 3037 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) 3038 req->rss_result[j] = 3039 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; 3040 3041 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3042 if (ret) { 3043 dev_err(&hdev->pdev->dev, 3044 "Configure rss indir table fail,status = %d\n", 3045 ret); 3046 return ret; 3047 } 3048 } 3049 return 0; 3050 } 3051 3052 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, 3053 u16 *tc_size, u16 *tc_offset) 3054 { 3055 struct hclge_rss_tc_mode_cmd *req; 3056 struct hclge_desc desc; 3057 int ret; 3058 int i; 3059 3060 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); 3061 req = (struct hclge_rss_tc_mode_cmd *)desc.data; 3062 3063 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 3064 u16 mode = 0; 3065 3066 hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); 3067 hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M, 3068 HCLGE_RSS_TC_SIZE_S, tc_size[i]); 3069 hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M, 3070 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); 3071 3072 req->rss_tc_mode[i] = cpu_to_le16(mode); 3073 } 3074 3075 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3076 if (ret) { 3077 dev_err(&hdev->pdev->dev, 3078 "Configure rss tc mode fail, status = %d\n", ret); 3079 return ret; 3080 } 3081 3082 return 0; 3083 } 3084 3085 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) 3086 { 3087 struct hclge_rss_input_tuple_cmd *req; 3088 struct hclge_desc desc; 3089 int ret; 3090 3091 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 3092 3093 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 3094 3095 /* Get the tuple cfg from pf */ 3096 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; 3097 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; 3098 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; 3099 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; 3100 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; 3101 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; 3102 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; 3103 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; 3104 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3105 if (ret) { 3106 dev_err(&hdev->pdev->dev, 3107 "Configure rss input fail, status = %d\n", ret); 3108 return ret; 3109 } 3110 3111 return 0; 3112 } 3113 3114 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, 3115 u8 *key, u8 *hfunc) 3116 { 3117 struct hclge_vport *vport = hclge_get_vport(handle); 3118 int i; 3119 3120 /* Get hash algorithm */ 3121 if (hfunc) 3122 *hfunc = vport->rss_algo; 3123 3124 /* Get the RSS Key required by the user */ 3125 if (key) 3126 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); 3127 3128 /* Get indirect table */ 3129 if (indir) 3130 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3131 indir[i] = vport->rss_indirection_tbl[i]; 3132 3133 return 0; 3134 } 3135 3136 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, 3137 const u8 *key, const u8 hfunc) 3138 { 3139 struct hclge_vport *vport = hclge_get_vport(handle); 3140 struct hclge_dev *hdev = vport->back; 3141 u8 hash_algo; 3142 int ret, i; 3143 3144 /* Set the RSS Hash Key if specififed by the user */ 3145 if (key) { 3146 3147 if (hfunc == ETH_RSS_HASH_TOP || 3148 hfunc == ETH_RSS_HASH_NO_CHANGE) 3149 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 3150 else 3151 return -EINVAL; 3152 ret = hclge_set_rss_algo_key(hdev, hash_algo, key); 3153 if (ret) 3154 return ret; 3155 3156 /* Update the shadow RSS key with user specified qids */ 3157 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); 3158 vport->rss_algo = hash_algo; 3159 } 3160 3161 /* Update the shadow RSS table with user specified qids */ 3162 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3163 vport->rss_indirection_tbl[i] = indir[i]; 3164 3165 /* Update the hardware */ 3166 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); 3167 } 3168 3169 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 3170 { 3171 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; 3172 3173 if (nfc->data & RXH_L4_B_2_3) 3174 hash_sets |= HCLGE_D_PORT_BIT; 3175 else 3176 hash_sets &= ~HCLGE_D_PORT_BIT; 3177 3178 if (nfc->data & RXH_IP_SRC) 3179 hash_sets |= HCLGE_S_IP_BIT; 3180 else 3181 hash_sets &= ~HCLGE_S_IP_BIT; 3182 3183 if (nfc->data & RXH_IP_DST) 3184 hash_sets |= HCLGE_D_IP_BIT; 3185 else 3186 hash_sets &= ~HCLGE_D_IP_BIT; 3187 3188 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 3189 hash_sets |= HCLGE_V_TAG_BIT; 3190 3191 return hash_sets; 3192 } 3193 3194 static int hclge_set_rss_tuple(struct hnae3_handle *handle, 3195 struct ethtool_rxnfc *nfc) 3196 { 3197 struct hclge_vport *vport = hclge_get_vport(handle); 3198 struct hclge_dev *hdev = vport->back; 3199 struct hclge_rss_input_tuple_cmd *req; 3200 struct hclge_desc desc; 3201 u8 tuple_sets; 3202 int ret; 3203 3204 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 3205 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 3206 return -EINVAL; 3207 3208 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 3209 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 3210 3211 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; 3212 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; 3213 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; 3214 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; 3215 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; 3216 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; 3217 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; 3218 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; 3219 3220 tuple_sets = hclge_get_rss_hash_bits(nfc); 3221 switch (nfc->flow_type) { 3222 case TCP_V4_FLOW: 3223 req->ipv4_tcp_en = tuple_sets; 3224 break; 3225 case TCP_V6_FLOW: 3226 req->ipv6_tcp_en = tuple_sets; 3227 break; 3228 case UDP_V4_FLOW: 3229 req->ipv4_udp_en = tuple_sets; 3230 break; 3231 case UDP_V6_FLOW: 3232 req->ipv6_udp_en = tuple_sets; 3233 break; 3234 case SCTP_V4_FLOW: 3235 req->ipv4_sctp_en = tuple_sets; 3236 break; 3237 case SCTP_V6_FLOW: 3238 if ((nfc->data & RXH_L4_B_0_1) || 3239 (nfc->data & RXH_L4_B_2_3)) 3240 return -EINVAL; 3241 3242 req->ipv6_sctp_en = tuple_sets; 3243 break; 3244 case IPV4_FLOW: 3245 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3246 break; 3247 case IPV6_FLOW: 3248 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3249 break; 3250 default: 3251 return -EINVAL; 3252 } 3253 3254 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3255 if (ret) { 3256 dev_err(&hdev->pdev->dev, 3257 "Set rss tuple fail, status = %d\n", ret); 3258 return ret; 3259 } 3260 3261 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 3262 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 3263 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 3264 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 3265 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 3266 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 3267 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 3268 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 3269 return 0; 3270 } 3271 3272 static int hclge_get_rss_tuple(struct hnae3_handle *handle, 3273 struct ethtool_rxnfc *nfc) 3274 { 3275 struct hclge_vport *vport = hclge_get_vport(handle); 3276 u8 tuple_sets; 3277 3278 nfc->data = 0; 3279 3280 switch (nfc->flow_type) { 3281 case TCP_V4_FLOW: 3282 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; 3283 break; 3284 case UDP_V4_FLOW: 3285 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; 3286 break; 3287 case TCP_V6_FLOW: 3288 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; 3289 break; 3290 case UDP_V6_FLOW: 3291 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; 3292 break; 3293 case SCTP_V4_FLOW: 3294 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; 3295 break; 3296 case SCTP_V6_FLOW: 3297 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; 3298 break; 3299 case IPV4_FLOW: 3300 case IPV6_FLOW: 3301 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; 3302 break; 3303 default: 3304 return -EINVAL; 3305 } 3306 3307 if (!tuple_sets) 3308 return 0; 3309 3310 if (tuple_sets & HCLGE_D_PORT_BIT) 3311 nfc->data |= RXH_L4_B_2_3; 3312 if (tuple_sets & HCLGE_S_PORT_BIT) 3313 nfc->data |= RXH_L4_B_0_1; 3314 if (tuple_sets & HCLGE_D_IP_BIT) 3315 nfc->data |= RXH_IP_DST; 3316 if (tuple_sets & HCLGE_S_IP_BIT) 3317 nfc->data |= RXH_IP_SRC; 3318 3319 return 0; 3320 } 3321 3322 static int hclge_get_tc_size(struct hnae3_handle *handle) 3323 { 3324 struct hclge_vport *vport = hclge_get_vport(handle); 3325 struct hclge_dev *hdev = vport->back; 3326 3327 return hdev->rss_size_max; 3328 } 3329 3330 int hclge_rss_init_hw(struct hclge_dev *hdev) 3331 { 3332 struct hclge_vport *vport = hdev->vport; 3333 u8 *rss_indir = vport[0].rss_indirection_tbl; 3334 u16 rss_size = vport[0].alloc_rss_size; 3335 u8 *key = vport[0].rss_hash_key; 3336 u8 hfunc = vport[0].rss_algo; 3337 u16 tc_offset[HCLGE_MAX_TC_NUM]; 3338 u16 tc_valid[HCLGE_MAX_TC_NUM]; 3339 u16 tc_size[HCLGE_MAX_TC_NUM]; 3340 u16 roundup_size; 3341 int i, ret; 3342 3343 ret = hclge_set_rss_indir_table(hdev, rss_indir); 3344 if (ret) 3345 return ret; 3346 3347 ret = hclge_set_rss_algo_key(hdev, hfunc, key); 3348 if (ret) 3349 return ret; 3350 3351 ret = hclge_set_rss_input_tuple(hdev); 3352 if (ret) 3353 return ret; 3354 3355 /* Each TC have the same queue size, and tc_size set to hardware is 3356 * the log2 of roundup power of two of rss_size, the acutal queue 3357 * size is limited by indirection table. 3358 */ 3359 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { 3360 dev_err(&hdev->pdev->dev, 3361 "Configure rss tc size failed, invalid TC_SIZE = %d\n", 3362 rss_size); 3363 return -EINVAL; 3364 } 3365 3366 roundup_size = roundup_pow_of_two(rss_size); 3367 roundup_size = ilog2(roundup_size); 3368 3369 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 3370 tc_valid[i] = 0; 3371 3372 if (!(hdev->hw_tc_map & BIT(i))) 3373 continue; 3374 3375 tc_valid[i] = 1; 3376 tc_size[i] = roundup_size; 3377 tc_offset[i] = rss_size * i; 3378 } 3379 3380 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 3381 } 3382 3383 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) 3384 { 3385 struct hclge_vport *vport = hdev->vport; 3386 int i, j; 3387 3388 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { 3389 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3390 vport[j].rss_indirection_tbl[i] = 3391 i % vport[j].alloc_rss_size; 3392 } 3393 } 3394 3395 static void hclge_rss_init_cfg(struct hclge_dev *hdev) 3396 { 3397 struct hclge_vport *vport = hdev->vport; 3398 int i; 3399 3400 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 3401 vport[i].rss_tuple_sets.ipv4_tcp_en = 3402 HCLGE_RSS_INPUT_TUPLE_OTHER; 3403 vport[i].rss_tuple_sets.ipv4_udp_en = 3404 HCLGE_RSS_INPUT_TUPLE_OTHER; 3405 vport[i].rss_tuple_sets.ipv4_sctp_en = 3406 HCLGE_RSS_INPUT_TUPLE_SCTP; 3407 vport[i].rss_tuple_sets.ipv4_fragment_en = 3408 HCLGE_RSS_INPUT_TUPLE_OTHER; 3409 vport[i].rss_tuple_sets.ipv6_tcp_en = 3410 HCLGE_RSS_INPUT_TUPLE_OTHER; 3411 vport[i].rss_tuple_sets.ipv6_udp_en = 3412 HCLGE_RSS_INPUT_TUPLE_OTHER; 3413 vport[i].rss_tuple_sets.ipv6_sctp_en = 3414 HCLGE_RSS_INPUT_TUPLE_SCTP; 3415 vport[i].rss_tuple_sets.ipv6_fragment_en = 3416 HCLGE_RSS_INPUT_TUPLE_OTHER; 3417 3418 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 3419 3420 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); 3421 } 3422 3423 hclge_rss_indir_init_cfg(hdev); 3424 } 3425 3426 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 3427 int vector_id, bool en, 3428 struct hnae3_ring_chain_node *ring_chain) 3429 { 3430 struct hclge_dev *hdev = vport->back; 3431 struct hnae3_ring_chain_node *node; 3432 struct hclge_desc desc; 3433 struct hclge_ctrl_vector_chain_cmd *req 3434 = (struct hclge_ctrl_vector_chain_cmd *)desc.data; 3435 enum hclge_cmd_status status; 3436 enum hclge_opcode_type op; 3437 u16 tqp_type_and_id; 3438 int i; 3439 3440 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; 3441 hclge_cmd_setup_basic_desc(&desc, op, false); 3442 req->int_vector_id = vector_id; 3443 3444 i = 0; 3445 for (node = ring_chain; node; node = node->next) { 3446 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); 3447 hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, 3448 HCLGE_INT_TYPE_S, 3449 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); 3450 hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, 3451 HCLGE_TQP_ID_S, node->tqp_index); 3452 hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, 3453 HCLGE_INT_GL_IDX_S, 3454 hnae_get_field(node->int_gl_idx, 3455 HNAE3_RING_GL_IDX_M, 3456 HNAE3_RING_GL_IDX_S)); 3457 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); 3458 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 3459 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 3460 req->vfid = vport->vport_id; 3461 3462 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3463 if (status) { 3464 dev_err(&hdev->pdev->dev, 3465 "Map TQP fail, status is %d.\n", 3466 status); 3467 return -EIO; 3468 } 3469 i = 0; 3470 3471 hclge_cmd_setup_basic_desc(&desc, 3472 op, 3473 false); 3474 req->int_vector_id = vector_id; 3475 } 3476 } 3477 3478 if (i > 0) { 3479 req->int_cause_num = i; 3480 req->vfid = vport->vport_id; 3481 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3482 if (status) { 3483 dev_err(&hdev->pdev->dev, 3484 "Map TQP fail, status is %d.\n", status); 3485 return -EIO; 3486 } 3487 } 3488 3489 return 0; 3490 } 3491 3492 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, 3493 int vector, 3494 struct hnae3_ring_chain_node *ring_chain) 3495 { 3496 struct hclge_vport *vport = hclge_get_vport(handle); 3497 struct hclge_dev *hdev = vport->back; 3498 int vector_id; 3499 3500 vector_id = hclge_get_vector_index(hdev, vector); 3501 if (vector_id < 0) { 3502 dev_err(&hdev->pdev->dev, 3503 "Get vector index fail. vector_id =%d\n", vector_id); 3504 return vector_id; 3505 } 3506 3507 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); 3508 } 3509 3510 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, 3511 int vector, 3512 struct hnae3_ring_chain_node *ring_chain) 3513 { 3514 struct hclge_vport *vport = hclge_get_vport(handle); 3515 struct hclge_dev *hdev = vport->back; 3516 int vector_id, ret; 3517 3518 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 3519 return 0; 3520 3521 vector_id = hclge_get_vector_index(hdev, vector); 3522 if (vector_id < 0) { 3523 dev_err(&handle->pdev->dev, 3524 "Get vector index fail. ret =%d\n", vector_id); 3525 return vector_id; 3526 } 3527 3528 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); 3529 if (ret) 3530 dev_err(&handle->pdev->dev, 3531 "Unmap ring from vector fail. vectorid=%d, ret =%d\n", 3532 vector_id, 3533 ret); 3534 3535 return ret; 3536 } 3537 3538 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, 3539 struct hclge_promisc_param *param) 3540 { 3541 struct hclge_promisc_cfg_cmd *req; 3542 struct hclge_desc desc; 3543 int ret; 3544 3545 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); 3546 3547 req = (struct hclge_promisc_cfg_cmd *)desc.data; 3548 req->vf_id = param->vf_id; 3549 3550 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on 3551 * pdev revision(0x20), new revision support them. The 3552 * value of this two fields will not return error when driver 3553 * send command to fireware in revision(0x20). 3554 */ 3555 req->flag = (param->enable << HCLGE_PROMISC_EN_B) | 3556 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; 3557 3558 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3559 if (ret) { 3560 dev_err(&hdev->pdev->dev, 3561 "Set promisc mode fail, status is %d.\n", ret); 3562 return ret; 3563 } 3564 return 0; 3565 } 3566 3567 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, 3568 bool en_mc, bool en_bc, int vport_id) 3569 { 3570 if (!param) 3571 return; 3572 3573 memset(param, 0, sizeof(struct hclge_promisc_param)); 3574 if (en_uc) 3575 param->enable = HCLGE_PROMISC_EN_UC; 3576 if (en_mc) 3577 param->enable |= HCLGE_PROMISC_EN_MC; 3578 if (en_bc) 3579 param->enable |= HCLGE_PROMISC_EN_BC; 3580 param->vf_id = vport_id; 3581 } 3582 3583 static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en) 3584 { 3585 struct hclge_vport *vport = hclge_get_vport(handle); 3586 struct hclge_dev *hdev = vport->back; 3587 struct hclge_promisc_param param; 3588 3589 hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); 3590 hclge_cmd_set_promisc_mode(hdev, ¶m); 3591 } 3592 3593 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) 3594 { 3595 struct hclge_desc desc; 3596 struct hclge_config_mac_mode_cmd *req = 3597 (struct hclge_config_mac_mode_cmd *)desc.data; 3598 u32 loop_en = 0; 3599 int ret; 3600 3601 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); 3602 hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); 3603 hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); 3604 hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); 3605 hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); 3606 hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); 3607 hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); 3608 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); 3609 hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); 3610 hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); 3611 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); 3612 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); 3613 hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); 3614 hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); 3615 hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); 3616 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 3617 3618 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3619 if (ret) 3620 dev_err(&hdev->pdev->dev, 3621 "mac enable fail, ret =%d.\n", ret); 3622 } 3623 3624 static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) 3625 { 3626 struct hclge_config_mac_mode_cmd *req; 3627 struct hclge_desc desc; 3628 u32 loop_en; 3629 int ret; 3630 3631 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; 3632 /* 1 Read out the MAC mode config at first */ 3633 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 3634 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3635 if (ret) { 3636 dev_err(&hdev->pdev->dev, 3637 "mac loopback get fail, ret =%d.\n", ret); 3638 return ret; 3639 } 3640 3641 /* 2 Then setup the loopback flag */ 3642 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 3643 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); 3644 3645 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 3646 3647 /* 3 Config mac work mode with loopback flag 3648 * and its original configure parameters 3649 */ 3650 hclge_cmd_reuse_desc(&desc, false); 3651 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3652 if (ret) 3653 dev_err(&hdev->pdev->dev, 3654 "mac loopback set fail, ret =%d.\n", ret); 3655 return ret; 3656 } 3657 3658 static int hclge_set_loopback(struct hnae3_handle *handle, 3659 enum hnae3_loop loop_mode, bool en) 3660 { 3661 struct hclge_vport *vport = hclge_get_vport(handle); 3662 struct hclge_dev *hdev = vport->back; 3663 int ret; 3664 3665 switch (loop_mode) { 3666 case HNAE3_MAC_INTER_LOOP_MAC: 3667 ret = hclge_set_mac_loopback(hdev, en); 3668 break; 3669 default: 3670 ret = -ENOTSUPP; 3671 dev_err(&hdev->pdev->dev, 3672 "loop_mode %d is not supported\n", loop_mode); 3673 break; 3674 } 3675 3676 return ret; 3677 } 3678 3679 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, 3680 int stream_id, bool enable) 3681 { 3682 struct hclge_desc desc; 3683 struct hclge_cfg_com_tqp_queue_cmd *req = 3684 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; 3685 int ret; 3686 3687 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 3688 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); 3689 req->stream_id = cpu_to_le16(stream_id); 3690 req->enable |= enable << HCLGE_TQP_ENABLE_B; 3691 3692 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3693 if (ret) 3694 dev_err(&hdev->pdev->dev, 3695 "Tqp enable fail, status =%d.\n", ret); 3696 return ret; 3697 } 3698 3699 static void hclge_reset_tqp_stats(struct hnae3_handle *handle) 3700 { 3701 struct hclge_vport *vport = hclge_get_vport(handle); 3702 struct hnae3_queue *queue; 3703 struct hclge_tqp *tqp; 3704 int i; 3705 3706 for (i = 0; i < vport->alloc_tqps; i++) { 3707 queue = handle->kinfo.tqp[i]; 3708 tqp = container_of(queue, struct hclge_tqp, q); 3709 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 3710 } 3711 } 3712 3713 static int hclge_ae_start(struct hnae3_handle *handle) 3714 { 3715 struct hclge_vport *vport = hclge_get_vport(handle); 3716 struct hclge_dev *hdev = vport->back; 3717 int i, ret; 3718 3719 for (i = 0; i < vport->alloc_tqps; i++) 3720 hclge_tqp_enable(hdev, i, 0, true); 3721 3722 /* mac enable */ 3723 hclge_cfg_mac_mode(hdev, true); 3724 clear_bit(HCLGE_STATE_DOWN, &hdev->state); 3725 mod_timer(&hdev->service_timer, jiffies + HZ); 3726 hdev->hw.mac.link = 0; 3727 3728 /* reset tqp stats */ 3729 hclge_reset_tqp_stats(handle); 3730 3731 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 3732 return 0; 3733 3734 ret = hclge_mac_start_phy(hdev); 3735 if (ret) 3736 return ret; 3737 3738 return 0; 3739 } 3740 3741 static void hclge_ae_stop(struct hnae3_handle *handle) 3742 { 3743 struct hclge_vport *vport = hclge_get_vport(handle); 3744 struct hclge_dev *hdev = vport->back; 3745 int i; 3746 3747 del_timer_sync(&hdev->service_timer); 3748 cancel_work_sync(&hdev->service_task); 3749 3750 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 3751 return; 3752 3753 for (i = 0; i < vport->alloc_tqps; i++) 3754 hclge_tqp_enable(hdev, i, 0, false); 3755 3756 /* Mac disable */ 3757 hclge_cfg_mac_mode(hdev, false); 3758 3759 hclge_mac_stop_phy(hdev); 3760 3761 /* reset tqp stats */ 3762 hclge_reset_tqp_stats(handle); 3763 del_timer_sync(&hdev->service_timer); 3764 cancel_work_sync(&hdev->service_task); 3765 hclge_update_link_status(hdev); 3766 } 3767 3768 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, 3769 u16 cmdq_resp, u8 resp_code, 3770 enum hclge_mac_vlan_tbl_opcode op) 3771 { 3772 struct hclge_dev *hdev = vport->back; 3773 int return_status = -EIO; 3774 3775 if (cmdq_resp) { 3776 dev_err(&hdev->pdev->dev, 3777 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", 3778 cmdq_resp); 3779 return -EIO; 3780 } 3781 3782 if (op == HCLGE_MAC_VLAN_ADD) { 3783 if ((!resp_code) || (resp_code == 1)) { 3784 return_status = 0; 3785 } else if (resp_code == 2) { 3786 return_status = -ENOSPC; 3787 dev_err(&hdev->pdev->dev, 3788 "add mac addr failed for uc_overflow.\n"); 3789 } else if (resp_code == 3) { 3790 return_status = -ENOSPC; 3791 dev_err(&hdev->pdev->dev, 3792 "add mac addr failed for mc_overflow.\n"); 3793 } else { 3794 dev_err(&hdev->pdev->dev, 3795 "add mac addr failed for undefined, code=%d.\n", 3796 resp_code); 3797 } 3798 } else if (op == HCLGE_MAC_VLAN_REMOVE) { 3799 if (!resp_code) { 3800 return_status = 0; 3801 } else if (resp_code == 1) { 3802 return_status = -ENOENT; 3803 dev_dbg(&hdev->pdev->dev, 3804 "remove mac addr failed for miss.\n"); 3805 } else { 3806 dev_err(&hdev->pdev->dev, 3807 "remove mac addr failed for undefined, code=%d.\n", 3808 resp_code); 3809 } 3810 } else if (op == HCLGE_MAC_VLAN_LKUP) { 3811 if (!resp_code) { 3812 return_status = 0; 3813 } else if (resp_code == 1) { 3814 return_status = -ENOENT; 3815 dev_dbg(&hdev->pdev->dev, 3816 "lookup mac addr failed for miss.\n"); 3817 } else { 3818 dev_err(&hdev->pdev->dev, 3819 "lookup mac addr failed for undefined, code=%d.\n", 3820 resp_code); 3821 } 3822 } else { 3823 return_status = -EINVAL; 3824 dev_err(&hdev->pdev->dev, 3825 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", 3826 op); 3827 } 3828 3829 return return_status; 3830 } 3831 3832 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) 3833 { 3834 int word_num; 3835 int bit_num; 3836 3837 if (vfid > 255 || vfid < 0) 3838 return -EIO; 3839 3840 if (vfid >= 0 && vfid <= 191) { 3841 word_num = vfid / 32; 3842 bit_num = vfid % 32; 3843 if (clr) 3844 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 3845 else 3846 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); 3847 } else { 3848 word_num = (vfid - 192) / 32; 3849 bit_num = vfid % 32; 3850 if (clr) 3851 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 3852 else 3853 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); 3854 } 3855 3856 return 0; 3857 } 3858 3859 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) 3860 { 3861 #define HCLGE_DESC_NUMBER 3 3862 #define HCLGE_FUNC_NUMBER_PER_DESC 6 3863 int i, j; 3864 3865 for (i = 0; i < HCLGE_DESC_NUMBER; i++) 3866 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) 3867 if (desc[i].data[j]) 3868 return false; 3869 3870 return true; 3871 } 3872 3873 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, 3874 const u8 *addr) 3875 { 3876 const unsigned char *mac_addr = addr; 3877 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | 3878 (mac_addr[0]) | (mac_addr[1] << 8); 3879 u32 low_val = mac_addr[4] | (mac_addr[5] << 8); 3880 3881 new_req->mac_addr_hi32 = cpu_to_le32(high_val); 3882 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); 3883 } 3884 3885 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, 3886 const u8 *addr) 3887 { 3888 u16 high_val = addr[1] | (addr[0] << 8); 3889 struct hclge_dev *hdev = vport->back; 3890 u32 rsh = 4 - hdev->mta_mac_sel_type; 3891 u16 ret_val = (high_val >> rsh) & 0xfff; 3892 3893 return ret_val; 3894 } 3895 3896 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 3897 enum hclge_mta_dmac_sel_type mta_mac_sel, 3898 bool enable) 3899 { 3900 struct hclge_mta_filter_mode_cmd *req; 3901 struct hclge_desc desc; 3902 int ret; 3903 3904 req = (struct hclge_mta_filter_mode_cmd *)desc.data; 3905 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); 3906 3907 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, 3908 enable); 3909 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, 3910 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); 3911 3912 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3913 if (ret) { 3914 dev_err(&hdev->pdev->dev, 3915 "Config mat filter mode failed for cmd_send, ret =%d.\n", 3916 ret); 3917 return ret; 3918 } 3919 3920 return 0; 3921 } 3922 3923 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, 3924 u8 func_id, 3925 bool enable) 3926 { 3927 struct hclge_cfg_func_mta_filter_cmd *req; 3928 struct hclge_desc desc; 3929 int ret; 3930 3931 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; 3932 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); 3933 3934 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, 3935 enable); 3936 req->function_id = func_id; 3937 3938 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3939 if (ret) { 3940 dev_err(&hdev->pdev->dev, 3941 "Config func_id enable failed for cmd_send, ret =%d.\n", 3942 ret); 3943 return ret; 3944 } 3945 3946 return 0; 3947 } 3948 3949 static int hclge_set_mta_table_item(struct hclge_vport *vport, 3950 u16 idx, 3951 bool enable) 3952 { 3953 struct hclge_dev *hdev = vport->back; 3954 struct hclge_cfg_func_mta_item_cmd *req; 3955 struct hclge_desc desc; 3956 u16 item_idx = 0; 3957 int ret; 3958 3959 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; 3960 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); 3961 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); 3962 3963 hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, 3964 HCLGE_CFG_MTA_ITEM_IDX_S, idx); 3965 req->item_idx = cpu_to_le16(item_idx); 3966 3967 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3968 if (ret) { 3969 dev_err(&hdev->pdev->dev, 3970 "Config mta table item failed for cmd_send, ret =%d.\n", 3971 ret); 3972 return ret; 3973 } 3974 3975 return 0; 3976 } 3977 3978 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, 3979 struct hclge_mac_vlan_tbl_entry_cmd *req) 3980 { 3981 struct hclge_dev *hdev = vport->back; 3982 struct hclge_desc desc; 3983 u8 resp_code; 3984 u16 retval; 3985 int ret; 3986 3987 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); 3988 3989 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 3990 3991 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3992 if (ret) { 3993 dev_err(&hdev->pdev->dev, 3994 "del mac addr failed for cmd_send, ret =%d.\n", 3995 ret); 3996 return ret; 3997 } 3998 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 3999 retval = le16_to_cpu(desc.retval); 4000 4001 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 4002 HCLGE_MAC_VLAN_REMOVE); 4003 } 4004 4005 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, 4006 struct hclge_mac_vlan_tbl_entry_cmd *req, 4007 struct hclge_desc *desc, 4008 bool is_mc) 4009 { 4010 struct hclge_dev *hdev = vport->back; 4011 u8 resp_code; 4012 u16 retval; 4013 int ret; 4014 4015 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); 4016 if (is_mc) { 4017 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4018 memcpy(desc[0].data, 4019 req, 4020 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4021 hclge_cmd_setup_basic_desc(&desc[1], 4022 HCLGE_OPC_MAC_VLAN_ADD, 4023 true); 4024 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4025 hclge_cmd_setup_basic_desc(&desc[2], 4026 HCLGE_OPC_MAC_VLAN_ADD, 4027 true); 4028 ret = hclge_cmd_send(&hdev->hw, desc, 3); 4029 } else { 4030 memcpy(desc[0].data, 4031 req, 4032 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4033 ret = hclge_cmd_send(&hdev->hw, desc, 1); 4034 } 4035 if (ret) { 4036 dev_err(&hdev->pdev->dev, 4037 "lookup mac addr failed for cmd_send, ret =%d.\n", 4038 ret); 4039 return ret; 4040 } 4041 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; 4042 retval = le16_to_cpu(desc[0].retval); 4043 4044 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 4045 HCLGE_MAC_VLAN_LKUP); 4046 } 4047 4048 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, 4049 struct hclge_mac_vlan_tbl_entry_cmd *req, 4050 struct hclge_desc *mc_desc) 4051 { 4052 struct hclge_dev *hdev = vport->back; 4053 int cfg_status; 4054 u8 resp_code; 4055 u16 retval; 4056 int ret; 4057 4058 if (!mc_desc) { 4059 struct hclge_desc desc; 4060 4061 hclge_cmd_setup_basic_desc(&desc, 4062 HCLGE_OPC_MAC_VLAN_ADD, 4063 false); 4064 memcpy(desc.data, req, 4065 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4066 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4067 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 4068 retval = le16_to_cpu(desc.retval); 4069 4070 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 4071 resp_code, 4072 HCLGE_MAC_VLAN_ADD); 4073 } else { 4074 hclge_cmd_reuse_desc(&mc_desc[0], false); 4075 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4076 hclge_cmd_reuse_desc(&mc_desc[1], false); 4077 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4078 hclge_cmd_reuse_desc(&mc_desc[2], false); 4079 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); 4080 memcpy(mc_desc[0].data, req, 4081 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4082 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); 4083 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; 4084 retval = le16_to_cpu(mc_desc[0].retval); 4085 4086 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 4087 resp_code, 4088 HCLGE_MAC_VLAN_ADD); 4089 } 4090 4091 if (ret) { 4092 dev_err(&hdev->pdev->dev, 4093 "add mac addr failed for cmd_send, ret =%d.\n", 4094 ret); 4095 return ret; 4096 } 4097 4098 return cfg_status; 4099 } 4100 4101 static int hclge_add_uc_addr(struct hnae3_handle *handle, 4102 const unsigned char *addr) 4103 { 4104 struct hclge_vport *vport = hclge_get_vport(handle); 4105 4106 return hclge_add_uc_addr_common(vport, addr); 4107 } 4108 4109 int hclge_add_uc_addr_common(struct hclge_vport *vport, 4110 const unsigned char *addr) 4111 { 4112 struct hclge_dev *hdev = vport->back; 4113 struct hclge_mac_vlan_tbl_entry_cmd req; 4114 struct hclge_desc desc; 4115 u16 egress_port = 0; 4116 int ret; 4117 4118 /* mac addr check */ 4119 if (is_zero_ether_addr(addr) || 4120 is_broadcast_ether_addr(addr) || 4121 is_multicast_ether_addr(addr)) { 4122 dev_err(&hdev->pdev->dev, 4123 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", 4124 addr, 4125 is_zero_ether_addr(addr), 4126 is_broadcast_ether_addr(addr), 4127 is_multicast_ether_addr(addr)); 4128 return -EINVAL; 4129 } 4130 4131 memset(&req, 0, sizeof(req)); 4132 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4133 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4134 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); 4135 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4136 4137 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0); 4138 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0); 4139 hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, 4140 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); 4141 hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M, 4142 HCLGE_MAC_EPORT_PFID_S, 0); 4143 4144 req.egress_port = cpu_to_le16(egress_port); 4145 4146 hclge_prepare_mac_addr(&req, addr); 4147 4148 /* Lookup the mac address in the mac_vlan table, and add 4149 * it if the entry is inexistent. Repeated unicast entry 4150 * is not allowed in the mac vlan table. 4151 */ 4152 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); 4153 if (ret == -ENOENT) 4154 return hclge_add_mac_vlan_tbl(vport, &req, NULL); 4155 4156 /* check if we just hit the duplicate */ 4157 if (!ret) 4158 ret = -EINVAL; 4159 4160 dev_err(&hdev->pdev->dev, 4161 "PF failed to add unicast entry(%pM) in the MAC table\n", 4162 addr); 4163 4164 return ret; 4165 } 4166 4167 static int hclge_rm_uc_addr(struct hnae3_handle *handle, 4168 const unsigned char *addr) 4169 { 4170 struct hclge_vport *vport = hclge_get_vport(handle); 4171 4172 return hclge_rm_uc_addr_common(vport, addr); 4173 } 4174 4175 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 4176 const unsigned char *addr) 4177 { 4178 struct hclge_dev *hdev = vport->back; 4179 struct hclge_mac_vlan_tbl_entry_cmd req; 4180 int ret; 4181 4182 /* mac addr check */ 4183 if (is_zero_ether_addr(addr) || 4184 is_broadcast_ether_addr(addr) || 4185 is_multicast_ether_addr(addr)) { 4186 dev_dbg(&hdev->pdev->dev, 4187 "Remove mac err! invalid mac:%pM.\n", 4188 addr); 4189 return -EINVAL; 4190 } 4191 4192 memset(&req, 0, sizeof(req)); 4193 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4194 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4195 hclge_prepare_mac_addr(&req, addr); 4196 ret = hclge_remove_mac_vlan_tbl(vport, &req); 4197 4198 return ret; 4199 } 4200 4201 static int hclge_add_mc_addr(struct hnae3_handle *handle, 4202 const unsigned char *addr) 4203 { 4204 struct hclge_vport *vport = hclge_get_vport(handle); 4205 4206 return hclge_add_mc_addr_common(vport, addr); 4207 } 4208 4209 int hclge_add_mc_addr_common(struct hclge_vport *vport, 4210 const unsigned char *addr) 4211 { 4212 struct hclge_dev *hdev = vport->back; 4213 struct hclge_mac_vlan_tbl_entry_cmd req; 4214 struct hclge_desc desc[3]; 4215 u16 tbl_idx; 4216 int status; 4217 4218 /* mac addr check */ 4219 if (!is_multicast_ether_addr(addr)) { 4220 dev_err(&hdev->pdev->dev, 4221 "Add mc mac err! invalid mac:%pM.\n", 4222 addr); 4223 return -EINVAL; 4224 } 4225 memset(&req, 0, sizeof(req)); 4226 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4227 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4228 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 4229 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4230 hclge_prepare_mac_addr(&req, addr); 4231 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 4232 if (!status) { 4233 /* This mac addr exist, update VFID for it */ 4234 hclge_update_desc_vfid(desc, vport->vport_id, false); 4235 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4236 } else { 4237 /* This mac addr do not exist, add new entry for it */ 4238 memset(desc[0].data, 0, sizeof(desc[0].data)); 4239 memset(desc[1].data, 0, sizeof(desc[0].data)); 4240 memset(desc[2].data, 0, sizeof(desc[0].data)); 4241 hclge_update_desc_vfid(desc, vport->vport_id, false); 4242 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4243 } 4244 4245 /* Set MTA table for this MAC address */ 4246 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); 4247 status = hclge_set_mta_table_item(vport, tbl_idx, true); 4248 4249 return status; 4250 } 4251 4252 static int hclge_rm_mc_addr(struct hnae3_handle *handle, 4253 const unsigned char *addr) 4254 { 4255 struct hclge_vport *vport = hclge_get_vport(handle); 4256 4257 return hclge_rm_mc_addr_common(vport, addr); 4258 } 4259 4260 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 4261 const unsigned char *addr) 4262 { 4263 struct hclge_dev *hdev = vport->back; 4264 struct hclge_mac_vlan_tbl_entry_cmd req; 4265 enum hclge_cmd_status status; 4266 struct hclge_desc desc[3]; 4267 u16 tbl_idx; 4268 4269 /* mac addr check */ 4270 if (!is_multicast_ether_addr(addr)) { 4271 dev_dbg(&hdev->pdev->dev, 4272 "Remove mc mac err! invalid mac:%pM.\n", 4273 addr); 4274 return -EINVAL; 4275 } 4276 4277 memset(&req, 0, sizeof(req)); 4278 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4279 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4280 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 4281 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4282 hclge_prepare_mac_addr(&req, addr); 4283 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 4284 if (!status) { 4285 /* This mac addr exist, remove this handle's VFID for it */ 4286 hclge_update_desc_vfid(desc, vport->vport_id, true); 4287 4288 if (hclge_is_all_function_id_zero(desc)) 4289 /* All the vfid is zero, so need to delete this entry */ 4290 status = hclge_remove_mac_vlan_tbl(vport, &req); 4291 else 4292 /* Not all the vfid is zero, update the vfid */ 4293 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4294 4295 } else { 4296 /* This mac addr do not exist, can't delete it */ 4297 dev_err(&hdev->pdev->dev, 4298 "Rm multicast mac addr failed, ret = %d.\n", 4299 status); 4300 return -EIO; 4301 } 4302 4303 /* Set MTB table for this MAC address */ 4304 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); 4305 status = hclge_set_mta_table_item(vport, tbl_idx, false); 4306 4307 return status; 4308 } 4309 4310 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, 4311 u16 cmdq_resp, u8 resp_code) 4312 { 4313 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 4314 #define HCLGE_ETHERTYPE_ALREADY_ADD 1 4315 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 4316 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 4317 4318 int return_status; 4319 4320 if (cmdq_resp) { 4321 dev_err(&hdev->pdev->dev, 4322 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", 4323 cmdq_resp); 4324 return -EIO; 4325 } 4326 4327 switch (resp_code) { 4328 case HCLGE_ETHERTYPE_SUCCESS_ADD: 4329 case HCLGE_ETHERTYPE_ALREADY_ADD: 4330 return_status = 0; 4331 break; 4332 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: 4333 dev_err(&hdev->pdev->dev, 4334 "add mac ethertype failed for manager table overflow.\n"); 4335 return_status = -EIO; 4336 break; 4337 case HCLGE_ETHERTYPE_KEY_CONFLICT: 4338 dev_err(&hdev->pdev->dev, 4339 "add mac ethertype failed for key conflict.\n"); 4340 return_status = -EIO; 4341 break; 4342 default: 4343 dev_err(&hdev->pdev->dev, 4344 "add mac ethertype failed for undefined, code=%d.\n", 4345 resp_code); 4346 return_status = -EIO; 4347 } 4348 4349 return return_status; 4350 } 4351 4352 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, 4353 const struct hclge_mac_mgr_tbl_entry_cmd *req) 4354 { 4355 struct hclge_desc desc; 4356 u8 resp_code; 4357 u16 retval; 4358 int ret; 4359 4360 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); 4361 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); 4362 4363 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4364 if (ret) { 4365 dev_err(&hdev->pdev->dev, 4366 "add mac ethertype failed for cmd_send, ret =%d.\n", 4367 ret); 4368 return ret; 4369 } 4370 4371 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 4372 retval = le16_to_cpu(desc.retval); 4373 4374 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); 4375 } 4376 4377 static int init_mgr_tbl(struct hclge_dev *hdev) 4378 { 4379 int ret; 4380 int i; 4381 4382 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { 4383 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); 4384 if (ret) { 4385 dev_err(&hdev->pdev->dev, 4386 "add mac ethertype failed, ret =%d.\n", 4387 ret); 4388 return ret; 4389 } 4390 } 4391 4392 return 0; 4393 } 4394 4395 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) 4396 { 4397 struct hclge_vport *vport = hclge_get_vport(handle); 4398 struct hclge_dev *hdev = vport->back; 4399 4400 ether_addr_copy(p, hdev->hw.mac.mac_addr); 4401 } 4402 4403 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, 4404 bool is_first) 4405 { 4406 const unsigned char *new_addr = (const unsigned char *)p; 4407 struct hclge_vport *vport = hclge_get_vport(handle); 4408 struct hclge_dev *hdev = vport->back; 4409 int ret; 4410 4411 /* mac addr check */ 4412 if (is_zero_ether_addr(new_addr) || 4413 is_broadcast_ether_addr(new_addr) || 4414 is_multicast_ether_addr(new_addr)) { 4415 dev_err(&hdev->pdev->dev, 4416 "Change uc mac err! invalid mac:%p.\n", 4417 new_addr); 4418 return -EINVAL; 4419 } 4420 4421 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) 4422 dev_warn(&hdev->pdev->dev, 4423 "remove old uc mac address fail.\n"); 4424 4425 ret = hclge_add_uc_addr(handle, new_addr); 4426 if (ret) { 4427 dev_err(&hdev->pdev->dev, 4428 "add uc mac address fail, ret =%d.\n", 4429 ret); 4430 4431 if (!is_first && 4432 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) 4433 dev_err(&hdev->pdev->dev, 4434 "restore uc mac address fail.\n"); 4435 4436 return -EIO; 4437 } 4438 4439 ret = hclge_pause_addr_cfg(hdev, new_addr); 4440 if (ret) { 4441 dev_err(&hdev->pdev->dev, 4442 "configure mac pause address fail, ret =%d.\n", 4443 ret); 4444 return -EIO; 4445 } 4446 4447 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); 4448 4449 return 0; 4450 } 4451 4452 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, 4453 bool filter_en) 4454 { 4455 struct hclge_vlan_filter_ctrl_cmd *req; 4456 struct hclge_desc desc; 4457 int ret; 4458 4459 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); 4460 4461 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; 4462 req->vlan_type = vlan_type; 4463 req->vlan_fe = filter_en; 4464 4465 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4466 if (ret) { 4467 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", 4468 ret); 4469 return ret; 4470 } 4471 4472 return 0; 4473 } 4474 4475 #define HCLGE_FILTER_TYPE_VF 0 4476 #define HCLGE_FILTER_TYPE_PORT 1 4477 4478 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 4479 { 4480 struct hclge_vport *vport = hclge_get_vport(handle); 4481 struct hclge_dev *hdev = vport->back; 4482 4483 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable); 4484 } 4485 4486 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, 4487 bool is_kill, u16 vlan, u8 qos, 4488 __be16 proto) 4489 { 4490 #define HCLGE_MAX_VF_BYTES 16 4491 struct hclge_vlan_filter_vf_cfg_cmd *req0; 4492 struct hclge_vlan_filter_vf_cfg_cmd *req1; 4493 struct hclge_desc desc[2]; 4494 u8 vf_byte_val; 4495 u8 vf_byte_off; 4496 int ret; 4497 4498 hclge_cmd_setup_basic_desc(&desc[0], 4499 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 4500 hclge_cmd_setup_basic_desc(&desc[1], 4501 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 4502 4503 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4504 4505 vf_byte_off = vfid / 8; 4506 vf_byte_val = 1 << (vfid % 8); 4507 4508 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 4509 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; 4510 4511 req0->vlan_id = cpu_to_le16(vlan); 4512 req0->vlan_cfg = is_kill; 4513 4514 if (vf_byte_off < HCLGE_MAX_VF_BYTES) 4515 req0->vf_bitmap[vf_byte_off] = vf_byte_val; 4516 else 4517 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; 4518 4519 ret = hclge_cmd_send(&hdev->hw, desc, 2); 4520 if (ret) { 4521 dev_err(&hdev->pdev->dev, 4522 "Send vf vlan command fail, ret =%d.\n", 4523 ret); 4524 return ret; 4525 } 4526 4527 if (!is_kill) { 4528 if (!req0->resp_code || req0->resp_code == 1) 4529 return 0; 4530 4531 dev_err(&hdev->pdev->dev, 4532 "Add vf vlan filter fail, ret =%d.\n", 4533 req0->resp_code); 4534 } else { 4535 if (!req0->resp_code) 4536 return 0; 4537 4538 dev_err(&hdev->pdev->dev, 4539 "Kill vf vlan filter fail, ret =%d.\n", 4540 req0->resp_code); 4541 } 4542 4543 return -EIO; 4544 } 4545 4546 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, 4547 u16 vlan_id, bool is_kill) 4548 { 4549 struct hclge_vlan_filter_pf_cfg_cmd *req; 4550 struct hclge_desc desc; 4551 u8 vlan_offset_byte_val; 4552 u8 vlan_offset_byte; 4553 u8 vlan_offset_160; 4554 int ret; 4555 4556 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); 4557 4558 vlan_offset_160 = vlan_id / 160; 4559 vlan_offset_byte = (vlan_id % 160) / 8; 4560 vlan_offset_byte_val = 1 << (vlan_id % 8); 4561 4562 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; 4563 req->vlan_offset = vlan_offset_160; 4564 req->vlan_cfg = is_kill; 4565 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 4566 4567 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4568 if (ret) 4569 dev_err(&hdev->pdev->dev, 4570 "port vlan command, send fail, ret =%d.\n", ret); 4571 return ret; 4572 } 4573 4574 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, 4575 u16 vport_id, u16 vlan_id, u8 qos, 4576 bool is_kill) 4577 { 4578 u16 vport_idx, vport_num = 0; 4579 int ret; 4580 4581 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, 4582 0, proto); 4583 if (ret) { 4584 dev_err(&hdev->pdev->dev, 4585 "Set %d vport vlan filter config fail, ret =%d.\n", 4586 vport_id, ret); 4587 return ret; 4588 } 4589 4590 /* vlan 0 may be added twice when 8021q module is enabled */ 4591 if (!is_kill && !vlan_id && 4592 test_bit(vport_id, hdev->vlan_table[vlan_id])) 4593 return 0; 4594 4595 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { 4596 dev_err(&hdev->pdev->dev, 4597 "Add port vlan failed, vport %d is already in vlan %d\n", 4598 vport_id, vlan_id); 4599 return -EINVAL; 4600 } 4601 4602 if (is_kill && 4603 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { 4604 dev_err(&hdev->pdev->dev, 4605 "Delete port vlan failed, vport %d is not in vlan %d\n", 4606 vport_id, vlan_id); 4607 return -EINVAL; 4608 } 4609 4610 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID) 4611 vport_num++; 4612 4613 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) 4614 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, 4615 is_kill); 4616 4617 return ret; 4618 } 4619 4620 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 4621 u16 vlan_id, bool is_kill) 4622 { 4623 struct hclge_vport *vport = hclge_get_vport(handle); 4624 struct hclge_dev *hdev = vport->back; 4625 4626 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, 4627 0, is_kill); 4628 } 4629 4630 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, 4631 u16 vlan, u8 qos, __be16 proto) 4632 { 4633 struct hclge_vport *vport = hclge_get_vport(handle); 4634 struct hclge_dev *hdev = vport->back; 4635 4636 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) 4637 return -EINVAL; 4638 if (proto != htons(ETH_P_8021Q)) 4639 return -EPROTONOSUPPORT; 4640 4641 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); 4642 } 4643 4644 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) 4645 { 4646 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; 4647 struct hclge_vport_vtag_tx_cfg_cmd *req; 4648 struct hclge_dev *hdev = vport->back; 4649 struct hclge_desc desc; 4650 int status; 4651 4652 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); 4653 4654 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; 4655 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); 4656 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); 4657 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, 4658 vcfg->accept_tag1 ? 1 : 0); 4659 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, 4660 vcfg->accept_untag1 ? 1 : 0); 4661 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, 4662 vcfg->accept_tag2 ? 1 : 0); 4663 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, 4664 vcfg->accept_untag2 ? 1 : 0); 4665 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, 4666 vcfg->insert_tag1_en ? 1 : 0); 4667 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, 4668 vcfg->insert_tag2_en ? 1 : 0); 4669 hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); 4670 4671 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 4672 req->vf_bitmap[req->vf_offset] = 4673 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 4674 4675 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4676 if (status) 4677 dev_err(&hdev->pdev->dev, 4678 "Send port txvlan cfg command fail, ret =%d\n", 4679 status); 4680 4681 return status; 4682 } 4683 4684 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) 4685 { 4686 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; 4687 struct hclge_vport_vtag_rx_cfg_cmd *req; 4688 struct hclge_dev *hdev = vport->back; 4689 struct hclge_desc desc; 4690 int status; 4691 4692 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); 4693 4694 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; 4695 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, 4696 vcfg->strip_tag1_en ? 1 : 0); 4697 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, 4698 vcfg->strip_tag2_en ? 1 : 0); 4699 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, 4700 vcfg->vlan1_vlan_prionly ? 1 : 0); 4701 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, 4702 vcfg->vlan2_vlan_prionly ? 1 : 0); 4703 4704 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 4705 req->vf_bitmap[req->vf_offset] = 4706 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 4707 4708 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4709 if (status) 4710 dev_err(&hdev->pdev->dev, 4711 "Send port rxvlan cfg command fail, ret =%d\n", 4712 status); 4713 4714 return status; 4715 } 4716 4717 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) 4718 { 4719 struct hclge_rx_vlan_type_cfg_cmd *rx_req; 4720 struct hclge_tx_vlan_type_cfg_cmd *tx_req; 4721 struct hclge_desc desc; 4722 int status; 4723 4724 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); 4725 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; 4726 rx_req->ot_fst_vlan_type = 4727 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); 4728 rx_req->ot_sec_vlan_type = 4729 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); 4730 rx_req->in_fst_vlan_type = 4731 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); 4732 rx_req->in_sec_vlan_type = 4733 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); 4734 4735 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4736 if (status) { 4737 dev_err(&hdev->pdev->dev, 4738 "Send rxvlan protocol type command fail, ret =%d\n", 4739 status); 4740 return status; 4741 } 4742 4743 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); 4744 4745 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data; 4746 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); 4747 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); 4748 4749 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4750 if (status) 4751 dev_err(&hdev->pdev->dev, 4752 "Send txvlan protocol type command fail, ret =%d\n", 4753 status); 4754 4755 return status; 4756 } 4757 4758 static int hclge_init_vlan_config(struct hclge_dev *hdev) 4759 { 4760 #define HCLGE_DEF_VLAN_TYPE 0x8100 4761 4762 struct hnae3_handle *handle; 4763 struct hclge_vport *vport; 4764 int ret; 4765 int i; 4766 4767 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true); 4768 if (ret) 4769 return ret; 4770 4771 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true); 4772 if (ret) 4773 return ret; 4774 4775 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 4776 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 4777 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 4778 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 4779 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; 4780 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; 4781 4782 ret = hclge_set_vlan_protocol_type(hdev); 4783 if (ret) 4784 return ret; 4785 4786 for (i = 0; i < hdev->num_alloc_vport; i++) { 4787 vport = &hdev->vport[i]; 4788 vport->txvlan_cfg.accept_tag1 = true; 4789 vport->txvlan_cfg.accept_untag1 = true; 4790 4791 /* accept_tag2 and accept_untag2 are not supported on 4792 * pdev revision(0x20), new revision support them. The 4793 * value of this two fields will not return error when driver 4794 * send command to fireware in revision(0x20). 4795 * This two fields can not configured by user. 4796 */ 4797 vport->txvlan_cfg.accept_tag2 = true; 4798 vport->txvlan_cfg.accept_untag2 = true; 4799 4800 vport->txvlan_cfg.insert_tag1_en = false; 4801 vport->txvlan_cfg.insert_tag2_en = false; 4802 vport->txvlan_cfg.default_tag1 = 0; 4803 vport->txvlan_cfg.default_tag2 = 0; 4804 4805 ret = hclge_set_vlan_tx_offload_cfg(vport); 4806 if (ret) 4807 return ret; 4808 4809 vport->rxvlan_cfg.strip_tag1_en = false; 4810 vport->rxvlan_cfg.strip_tag2_en = true; 4811 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 4812 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 4813 4814 ret = hclge_set_vlan_rx_offload_cfg(vport); 4815 if (ret) 4816 return ret; 4817 } 4818 4819 handle = &hdev->vport[0].nic; 4820 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); 4821 } 4822 4823 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 4824 { 4825 struct hclge_vport *vport = hclge_get_vport(handle); 4826 4827 vport->rxvlan_cfg.strip_tag1_en = false; 4828 vport->rxvlan_cfg.strip_tag2_en = enable; 4829 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 4830 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 4831 4832 return hclge_set_vlan_rx_offload_cfg(vport); 4833 } 4834 4835 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) 4836 { 4837 struct hclge_config_max_frm_size_cmd *req; 4838 struct hclge_desc desc; 4839 int max_frm_size; 4840 int ret; 4841 4842 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 4843 4844 if (max_frm_size < HCLGE_MAC_MIN_FRAME || 4845 max_frm_size > HCLGE_MAC_MAX_FRAME) 4846 return -EINVAL; 4847 4848 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); 4849 4850 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); 4851 4852 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 4853 req->max_frm_size = cpu_to_le16(max_frm_size); 4854 4855 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4856 if (ret) { 4857 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); 4858 return ret; 4859 } 4860 4861 hdev->mps = max_frm_size; 4862 4863 return 0; 4864 } 4865 4866 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) 4867 { 4868 struct hclge_vport *vport = hclge_get_vport(handle); 4869 struct hclge_dev *hdev = vport->back; 4870 int ret; 4871 4872 ret = hclge_set_mac_mtu(hdev, new_mtu); 4873 if (ret) { 4874 dev_err(&hdev->pdev->dev, 4875 "Change mtu fail, ret =%d\n", ret); 4876 return ret; 4877 } 4878 4879 ret = hclge_buffer_alloc(hdev); 4880 if (ret) 4881 dev_err(&hdev->pdev->dev, 4882 "Allocate buffer fail, ret =%d\n", ret); 4883 4884 return ret; 4885 } 4886 4887 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, 4888 bool enable) 4889 { 4890 struct hclge_reset_tqp_queue_cmd *req; 4891 struct hclge_desc desc; 4892 int ret; 4893 4894 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); 4895 4896 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 4897 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 4898 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); 4899 4900 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4901 if (ret) { 4902 dev_err(&hdev->pdev->dev, 4903 "Send tqp reset cmd error, status =%d\n", ret); 4904 return ret; 4905 } 4906 4907 return 0; 4908 } 4909 4910 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) 4911 { 4912 struct hclge_reset_tqp_queue_cmd *req; 4913 struct hclge_desc desc; 4914 int ret; 4915 4916 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); 4917 4918 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 4919 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 4920 4921 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4922 if (ret) { 4923 dev_err(&hdev->pdev->dev, 4924 "Get reset status error, status =%d\n", ret); 4925 return ret; 4926 } 4927 4928 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 4929 } 4930 4931 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, 4932 u16 queue_id) 4933 { 4934 struct hnae3_queue *queue; 4935 struct hclge_tqp *tqp; 4936 4937 queue = handle->kinfo.tqp[queue_id]; 4938 tqp = container_of(queue, struct hclge_tqp, q); 4939 4940 return tqp->index; 4941 } 4942 4943 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 4944 { 4945 struct hclge_vport *vport = hclge_get_vport(handle); 4946 struct hclge_dev *hdev = vport->back; 4947 int reset_try_times = 0; 4948 int reset_status; 4949 u16 queue_gid; 4950 int ret; 4951 4952 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 4953 return; 4954 4955 queue_gid = hclge_covert_handle_qid_global(handle, queue_id); 4956 4957 ret = hclge_tqp_enable(hdev, queue_id, 0, false); 4958 if (ret) { 4959 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); 4960 return; 4961 } 4962 4963 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 4964 if (ret) { 4965 dev_warn(&hdev->pdev->dev, 4966 "Send reset tqp cmd fail, ret = %d\n", ret); 4967 return; 4968 } 4969 4970 reset_try_times = 0; 4971 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 4972 /* Wait for tqp hw reset */ 4973 msleep(20); 4974 reset_status = hclge_get_reset_status(hdev, queue_gid); 4975 if (reset_status) 4976 break; 4977 } 4978 4979 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 4980 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 4981 return; 4982 } 4983 4984 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 4985 if (ret) { 4986 dev_warn(&hdev->pdev->dev, 4987 "Deassert the soft reset fail, ret = %d\n", ret); 4988 return; 4989 } 4990 } 4991 4992 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) 4993 { 4994 struct hclge_dev *hdev = vport->back; 4995 int reset_try_times = 0; 4996 int reset_status; 4997 u16 queue_gid; 4998 int ret; 4999 5000 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); 5001 5002 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 5003 if (ret) { 5004 dev_warn(&hdev->pdev->dev, 5005 "Send reset tqp cmd fail, ret = %d\n", ret); 5006 return; 5007 } 5008 5009 reset_try_times = 0; 5010 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 5011 /* Wait for tqp hw reset */ 5012 msleep(20); 5013 reset_status = hclge_get_reset_status(hdev, queue_gid); 5014 if (reset_status) 5015 break; 5016 } 5017 5018 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 5019 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 5020 return; 5021 } 5022 5023 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 5024 if (ret) 5025 dev_warn(&hdev->pdev->dev, 5026 "Deassert the soft reset fail, ret = %d\n", ret); 5027 } 5028 5029 static u32 hclge_get_fw_version(struct hnae3_handle *handle) 5030 { 5031 struct hclge_vport *vport = hclge_get_vport(handle); 5032 struct hclge_dev *hdev = vport->back; 5033 5034 return hdev->fw_version; 5035 } 5036 5037 static void hclge_get_flowctrl_adv(struct hnae3_handle *handle, 5038 u32 *flowctrl_adv) 5039 { 5040 struct hclge_vport *vport = hclge_get_vport(handle); 5041 struct hclge_dev *hdev = vport->back; 5042 struct phy_device *phydev = hdev->hw.mac.phydev; 5043 5044 if (!phydev) 5045 return; 5046 5047 *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) | 5048 (phydev->advertising & ADVERTISED_Asym_Pause); 5049 } 5050 5051 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 5052 { 5053 struct phy_device *phydev = hdev->hw.mac.phydev; 5054 5055 if (!phydev) 5056 return; 5057 5058 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); 5059 5060 if (rx_en) 5061 phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; 5062 5063 if (tx_en) 5064 phydev->advertising ^= ADVERTISED_Asym_Pause; 5065 } 5066 5067 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 5068 { 5069 int ret; 5070 5071 if (rx_en && tx_en) 5072 hdev->fc_mode_last_time = HCLGE_FC_FULL; 5073 else if (rx_en && !tx_en) 5074 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; 5075 else if (!rx_en && tx_en) 5076 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; 5077 else 5078 hdev->fc_mode_last_time = HCLGE_FC_NONE; 5079 5080 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 5081 return 0; 5082 5083 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 5084 if (ret) { 5085 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", 5086 ret); 5087 return ret; 5088 } 5089 5090 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 5091 5092 return 0; 5093 } 5094 5095 int hclge_cfg_flowctrl(struct hclge_dev *hdev) 5096 { 5097 struct phy_device *phydev = hdev->hw.mac.phydev; 5098 u16 remote_advertising = 0; 5099 u16 local_advertising = 0; 5100 u32 rx_pause, tx_pause; 5101 u8 flowctl; 5102 5103 if (!phydev->link || !phydev->autoneg) 5104 return 0; 5105 5106 if (phydev->advertising & ADVERTISED_Pause) 5107 local_advertising = ADVERTISE_PAUSE_CAP; 5108 5109 if (phydev->advertising & ADVERTISED_Asym_Pause) 5110 local_advertising |= ADVERTISE_PAUSE_ASYM; 5111 5112 if (phydev->pause) 5113 remote_advertising = LPA_PAUSE_CAP; 5114 5115 if (phydev->asym_pause) 5116 remote_advertising |= LPA_PAUSE_ASYM; 5117 5118 flowctl = mii_resolve_flowctrl_fdx(local_advertising, 5119 remote_advertising); 5120 tx_pause = flowctl & FLOW_CTRL_TX; 5121 rx_pause = flowctl & FLOW_CTRL_RX; 5122 5123 if (phydev->duplex == HCLGE_MAC_HALF) { 5124 tx_pause = 0; 5125 rx_pause = 0; 5126 } 5127 5128 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); 5129 } 5130 5131 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, 5132 u32 *rx_en, u32 *tx_en) 5133 { 5134 struct hclge_vport *vport = hclge_get_vport(handle); 5135 struct hclge_dev *hdev = vport->back; 5136 5137 *auto_neg = hclge_get_autoneg(handle); 5138 5139 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 5140 *rx_en = 0; 5141 *tx_en = 0; 5142 return; 5143 } 5144 5145 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { 5146 *rx_en = 1; 5147 *tx_en = 0; 5148 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { 5149 *tx_en = 1; 5150 *rx_en = 0; 5151 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { 5152 *rx_en = 1; 5153 *tx_en = 1; 5154 } else { 5155 *rx_en = 0; 5156 *tx_en = 0; 5157 } 5158 } 5159 5160 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, 5161 u32 rx_en, u32 tx_en) 5162 { 5163 struct hclge_vport *vport = hclge_get_vport(handle); 5164 struct hclge_dev *hdev = vport->back; 5165 struct phy_device *phydev = hdev->hw.mac.phydev; 5166 u32 fc_autoneg; 5167 5168 fc_autoneg = hclge_get_autoneg(handle); 5169 if (auto_neg != fc_autoneg) { 5170 dev_info(&hdev->pdev->dev, 5171 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 5172 return -EOPNOTSUPP; 5173 } 5174 5175 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 5176 dev_info(&hdev->pdev->dev, 5177 "Priority flow control enabled. Cannot set link flow control.\n"); 5178 return -EOPNOTSUPP; 5179 } 5180 5181 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); 5182 5183 if (!fc_autoneg) 5184 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); 5185 5186 /* Only support flow control negotiation for netdev with 5187 * phy attached for now. 5188 */ 5189 if (!phydev) 5190 return -EOPNOTSUPP; 5191 5192 return phy_start_aneg(phydev); 5193 } 5194 5195 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, 5196 u8 *auto_neg, u32 *speed, u8 *duplex) 5197 { 5198 struct hclge_vport *vport = hclge_get_vport(handle); 5199 struct hclge_dev *hdev = vport->back; 5200 5201 if (speed) 5202 *speed = hdev->hw.mac.speed; 5203 if (duplex) 5204 *duplex = hdev->hw.mac.duplex; 5205 if (auto_neg) 5206 *auto_neg = hdev->hw.mac.autoneg; 5207 } 5208 5209 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) 5210 { 5211 struct hclge_vport *vport = hclge_get_vport(handle); 5212 struct hclge_dev *hdev = vport->back; 5213 5214 if (media_type) 5215 *media_type = hdev->hw.mac.media_type; 5216 } 5217 5218 static void hclge_get_mdix_mode(struct hnae3_handle *handle, 5219 u8 *tp_mdix_ctrl, u8 *tp_mdix) 5220 { 5221 struct hclge_vport *vport = hclge_get_vport(handle); 5222 struct hclge_dev *hdev = vport->back; 5223 struct phy_device *phydev = hdev->hw.mac.phydev; 5224 int mdix_ctrl, mdix, retval, is_resolved; 5225 5226 if (!phydev) { 5227 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 5228 *tp_mdix = ETH_TP_MDI_INVALID; 5229 return; 5230 } 5231 5232 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); 5233 5234 retval = phy_read(phydev, HCLGE_PHY_CSC_REG); 5235 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, 5236 HCLGE_PHY_MDIX_CTRL_S); 5237 5238 retval = phy_read(phydev, HCLGE_PHY_CSS_REG); 5239 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); 5240 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); 5241 5242 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); 5243 5244 switch (mdix_ctrl) { 5245 case 0x0: 5246 *tp_mdix_ctrl = ETH_TP_MDI; 5247 break; 5248 case 0x1: 5249 *tp_mdix_ctrl = ETH_TP_MDI_X; 5250 break; 5251 case 0x3: 5252 *tp_mdix_ctrl = ETH_TP_MDI_AUTO; 5253 break; 5254 default: 5255 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 5256 break; 5257 } 5258 5259 if (!is_resolved) 5260 *tp_mdix = ETH_TP_MDI_INVALID; 5261 else if (mdix) 5262 *tp_mdix = ETH_TP_MDI_X; 5263 else 5264 *tp_mdix = ETH_TP_MDI; 5265 } 5266 5267 static int hclge_init_client_instance(struct hnae3_client *client, 5268 struct hnae3_ae_dev *ae_dev) 5269 { 5270 struct hclge_dev *hdev = ae_dev->priv; 5271 struct hclge_vport *vport; 5272 int i, ret; 5273 5274 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 5275 vport = &hdev->vport[i]; 5276 5277 switch (client->type) { 5278 case HNAE3_CLIENT_KNIC: 5279 5280 hdev->nic_client = client; 5281 vport->nic.client = client; 5282 ret = client->ops->init_instance(&vport->nic); 5283 if (ret) 5284 return ret; 5285 5286 if (hdev->roce_client && 5287 hnae3_dev_roce_supported(hdev)) { 5288 struct hnae3_client *rc = hdev->roce_client; 5289 5290 ret = hclge_init_roce_base_info(vport); 5291 if (ret) 5292 return ret; 5293 5294 ret = rc->ops->init_instance(&vport->roce); 5295 if (ret) 5296 return ret; 5297 } 5298 5299 break; 5300 case HNAE3_CLIENT_UNIC: 5301 hdev->nic_client = client; 5302 vport->nic.client = client; 5303 5304 ret = client->ops->init_instance(&vport->nic); 5305 if (ret) 5306 return ret; 5307 5308 break; 5309 case HNAE3_CLIENT_ROCE: 5310 if (hnae3_dev_roce_supported(hdev)) { 5311 hdev->roce_client = client; 5312 vport->roce.client = client; 5313 } 5314 5315 if (hdev->roce_client && hdev->nic_client) { 5316 ret = hclge_init_roce_base_info(vport); 5317 if (ret) 5318 return ret; 5319 5320 ret = client->ops->init_instance(&vport->roce); 5321 if (ret) 5322 return ret; 5323 } 5324 } 5325 } 5326 5327 return 0; 5328 } 5329 5330 static void hclge_uninit_client_instance(struct hnae3_client *client, 5331 struct hnae3_ae_dev *ae_dev) 5332 { 5333 struct hclge_dev *hdev = ae_dev->priv; 5334 struct hclge_vport *vport; 5335 int i; 5336 5337 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 5338 vport = &hdev->vport[i]; 5339 if (hdev->roce_client) { 5340 hdev->roce_client->ops->uninit_instance(&vport->roce, 5341 0); 5342 hdev->roce_client = NULL; 5343 vport->roce.client = NULL; 5344 } 5345 if (client->type == HNAE3_CLIENT_ROCE) 5346 return; 5347 if (client->ops->uninit_instance) { 5348 client->ops->uninit_instance(&vport->nic, 0); 5349 hdev->nic_client = NULL; 5350 vport->nic.client = NULL; 5351 } 5352 } 5353 } 5354 5355 static int hclge_pci_init(struct hclge_dev *hdev) 5356 { 5357 struct pci_dev *pdev = hdev->pdev; 5358 struct hclge_hw *hw; 5359 int ret; 5360 5361 ret = pci_enable_device(pdev); 5362 if (ret) { 5363 dev_err(&pdev->dev, "failed to enable PCI device\n"); 5364 return ret; 5365 } 5366 5367 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 5368 if (ret) { 5369 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 5370 if (ret) { 5371 dev_err(&pdev->dev, 5372 "can't set consistent PCI DMA"); 5373 goto err_disable_device; 5374 } 5375 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); 5376 } 5377 5378 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); 5379 if (ret) { 5380 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 5381 goto err_disable_device; 5382 } 5383 5384 pci_set_master(pdev); 5385 hw = &hdev->hw; 5386 hw->back = hdev; 5387 hw->io_base = pcim_iomap(pdev, 2, 0); 5388 if (!hw->io_base) { 5389 dev_err(&pdev->dev, "Can't map configuration register space\n"); 5390 ret = -ENOMEM; 5391 goto err_clr_master; 5392 } 5393 5394 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); 5395 5396 return 0; 5397 err_clr_master: 5398 pci_clear_master(pdev); 5399 pci_release_regions(pdev); 5400 err_disable_device: 5401 pci_disable_device(pdev); 5402 5403 return ret; 5404 } 5405 5406 static void hclge_pci_uninit(struct hclge_dev *hdev) 5407 { 5408 struct pci_dev *pdev = hdev->pdev; 5409 5410 pcim_iounmap(pdev, hdev->hw.io_base); 5411 pci_free_irq_vectors(pdev); 5412 pci_clear_master(pdev); 5413 pci_release_mem_regions(pdev); 5414 pci_disable_device(pdev); 5415 } 5416 5417 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) 5418 { 5419 struct pci_dev *pdev = ae_dev->pdev; 5420 struct hclge_dev *hdev; 5421 int ret; 5422 5423 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 5424 if (!hdev) { 5425 ret = -ENOMEM; 5426 goto out; 5427 } 5428 5429 hdev->pdev = pdev; 5430 hdev->ae_dev = ae_dev; 5431 hdev->reset_type = HNAE3_NONE_RESET; 5432 hdev->reset_request = 0; 5433 hdev->reset_pending = 0; 5434 ae_dev->priv = hdev; 5435 5436 ret = hclge_pci_init(hdev); 5437 if (ret) { 5438 dev_err(&pdev->dev, "PCI init failed\n"); 5439 goto out; 5440 } 5441 5442 /* Firmware command queue initialize */ 5443 ret = hclge_cmd_queue_init(hdev); 5444 if (ret) { 5445 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); 5446 goto err_pci_uninit; 5447 } 5448 5449 /* Firmware command initialize */ 5450 ret = hclge_cmd_init(hdev); 5451 if (ret) 5452 goto err_cmd_uninit; 5453 5454 ret = hclge_get_cap(hdev); 5455 if (ret) { 5456 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 5457 ret); 5458 goto err_cmd_uninit; 5459 } 5460 5461 ret = hclge_configure(hdev); 5462 if (ret) { 5463 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 5464 goto err_cmd_uninit; 5465 } 5466 5467 ret = hclge_init_msi(hdev); 5468 if (ret) { 5469 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); 5470 goto err_cmd_uninit; 5471 } 5472 5473 ret = hclge_misc_irq_init(hdev); 5474 if (ret) { 5475 dev_err(&pdev->dev, 5476 "Misc IRQ(vector0) init error, ret = %d.\n", 5477 ret); 5478 goto err_msi_uninit; 5479 } 5480 5481 ret = hclge_alloc_tqps(hdev); 5482 if (ret) { 5483 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); 5484 goto err_msi_irq_uninit; 5485 } 5486 5487 ret = hclge_alloc_vport(hdev); 5488 if (ret) { 5489 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); 5490 goto err_msi_irq_uninit; 5491 } 5492 5493 ret = hclge_map_tqp(hdev); 5494 if (ret) { 5495 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 5496 goto err_msi_irq_uninit; 5497 } 5498 5499 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { 5500 ret = hclge_mac_mdio_config(hdev); 5501 if (ret) { 5502 dev_err(&hdev->pdev->dev, 5503 "mdio config fail ret=%d\n", ret); 5504 goto err_msi_irq_uninit; 5505 } 5506 } 5507 5508 ret = hclge_mac_init(hdev); 5509 if (ret) { 5510 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 5511 goto err_mdiobus_unreg; 5512 } 5513 5514 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 5515 if (ret) { 5516 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 5517 goto err_mdiobus_unreg; 5518 } 5519 5520 ret = hclge_init_vlan_config(hdev); 5521 if (ret) { 5522 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 5523 goto err_mdiobus_unreg; 5524 } 5525 5526 ret = hclge_tm_schd_init(hdev); 5527 if (ret) { 5528 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 5529 goto err_mdiobus_unreg; 5530 } 5531 5532 hclge_rss_init_cfg(hdev); 5533 ret = hclge_rss_init_hw(hdev); 5534 if (ret) { 5535 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 5536 goto err_mdiobus_unreg; 5537 } 5538 5539 ret = init_mgr_tbl(hdev); 5540 if (ret) { 5541 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); 5542 goto err_mdiobus_unreg; 5543 } 5544 5545 hclge_dcb_ops_set(hdev); 5546 5547 timer_setup(&hdev->service_timer, hclge_service_timer, 0); 5548 INIT_WORK(&hdev->service_task, hclge_service_task); 5549 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); 5550 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); 5551 5552 /* Enable MISC vector(vector0) */ 5553 hclge_enable_vector(&hdev->misc_vector, true); 5554 5555 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); 5556 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5557 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 5558 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 5559 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 5560 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 5561 5562 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); 5563 return 0; 5564 5565 err_mdiobus_unreg: 5566 if (hdev->hw.mac.phydev) 5567 mdiobus_unregister(hdev->hw.mac.mdio_bus); 5568 err_msi_irq_uninit: 5569 hclge_misc_irq_uninit(hdev); 5570 err_msi_uninit: 5571 pci_free_irq_vectors(pdev); 5572 err_cmd_uninit: 5573 hclge_destroy_cmd_queue(&hdev->hw); 5574 err_pci_uninit: 5575 pcim_iounmap(pdev, hdev->hw.io_base); 5576 pci_clear_master(pdev); 5577 pci_release_regions(pdev); 5578 pci_disable_device(pdev); 5579 out: 5580 return ret; 5581 } 5582 5583 static void hclge_stats_clear(struct hclge_dev *hdev) 5584 { 5585 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); 5586 } 5587 5588 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) 5589 { 5590 struct hclge_dev *hdev = ae_dev->priv; 5591 struct pci_dev *pdev = ae_dev->pdev; 5592 int ret; 5593 5594 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5595 5596 hclge_stats_clear(hdev); 5597 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); 5598 5599 ret = hclge_cmd_init(hdev); 5600 if (ret) { 5601 dev_err(&pdev->dev, "Cmd queue init failed\n"); 5602 return ret; 5603 } 5604 5605 ret = hclge_get_cap(hdev); 5606 if (ret) { 5607 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 5608 ret); 5609 return ret; 5610 } 5611 5612 ret = hclge_configure(hdev); 5613 if (ret) { 5614 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 5615 return ret; 5616 } 5617 5618 ret = hclge_map_tqp(hdev); 5619 if (ret) { 5620 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 5621 return ret; 5622 } 5623 5624 ret = hclge_mac_init(hdev); 5625 if (ret) { 5626 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 5627 return ret; 5628 } 5629 5630 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 5631 if (ret) { 5632 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 5633 return ret; 5634 } 5635 5636 ret = hclge_init_vlan_config(hdev); 5637 if (ret) { 5638 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 5639 return ret; 5640 } 5641 5642 ret = hclge_tm_init_hw(hdev); 5643 if (ret) { 5644 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); 5645 return ret; 5646 } 5647 5648 ret = hclge_rss_init_hw(hdev); 5649 if (ret) { 5650 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 5651 return ret; 5652 } 5653 5654 /* Enable MISC vector(vector0) */ 5655 hclge_enable_vector(&hdev->misc_vector, true); 5656 5657 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", 5658 HCLGE_DRIVER_NAME); 5659 5660 return 0; 5661 } 5662 5663 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 5664 { 5665 struct hclge_dev *hdev = ae_dev->priv; 5666 struct hclge_mac *mac = &hdev->hw.mac; 5667 5668 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5669 5670 if (hdev->service_timer.function) 5671 del_timer_sync(&hdev->service_timer); 5672 if (hdev->service_task.func) 5673 cancel_work_sync(&hdev->service_task); 5674 if (hdev->rst_service_task.func) 5675 cancel_work_sync(&hdev->rst_service_task); 5676 if (hdev->mbx_service_task.func) 5677 cancel_work_sync(&hdev->mbx_service_task); 5678 5679 if (mac->phydev) 5680 mdiobus_unregister(mac->mdio_bus); 5681 5682 /* Disable MISC vector(vector0) */ 5683 hclge_enable_vector(&hdev->misc_vector, false); 5684 hclge_destroy_cmd_queue(&hdev->hw); 5685 hclge_misc_irq_uninit(hdev); 5686 hclge_pci_uninit(hdev); 5687 ae_dev->priv = NULL; 5688 } 5689 5690 static u32 hclge_get_max_channels(struct hnae3_handle *handle) 5691 { 5692 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 5693 struct hclge_vport *vport = hclge_get_vport(handle); 5694 struct hclge_dev *hdev = vport->back; 5695 5696 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 5697 } 5698 5699 static void hclge_get_channels(struct hnae3_handle *handle, 5700 struct ethtool_channels *ch) 5701 { 5702 struct hclge_vport *vport = hclge_get_vport(handle); 5703 5704 ch->max_combined = hclge_get_max_channels(handle); 5705 ch->other_count = 1; 5706 ch->max_other = 1; 5707 ch->combined_count = vport->alloc_tqps; 5708 } 5709 5710 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, 5711 u16 *free_tqps, u16 *max_rss_size) 5712 { 5713 struct hclge_vport *vport = hclge_get_vport(handle); 5714 struct hclge_dev *hdev = vport->back; 5715 u16 temp_tqps = 0; 5716 int i; 5717 5718 for (i = 0; i < hdev->num_tqps; i++) { 5719 if (!hdev->htqp[i].alloced) 5720 temp_tqps++; 5721 } 5722 *free_tqps = temp_tqps; 5723 *max_rss_size = hdev->rss_size_max; 5724 } 5725 5726 static void hclge_release_tqp(struct hclge_vport *vport) 5727 { 5728 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5729 struct hclge_dev *hdev = vport->back; 5730 int i; 5731 5732 for (i = 0; i < kinfo->num_tqps; i++) { 5733 struct hclge_tqp *tqp = 5734 container_of(kinfo->tqp[i], struct hclge_tqp, q); 5735 5736 tqp->q.handle = NULL; 5737 tqp->q.tqp_index = 0; 5738 tqp->alloced = false; 5739 } 5740 5741 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 5742 kinfo->tqp = NULL; 5743 } 5744 5745 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) 5746 { 5747 struct hclge_vport *vport = hclge_get_vport(handle); 5748 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5749 struct hclge_dev *hdev = vport->back; 5750 int cur_rss_size = kinfo->rss_size; 5751 int cur_tqps = kinfo->num_tqps; 5752 u16 tc_offset[HCLGE_MAX_TC_NUM]; 5753 u16 tc_valid[HCLGE_MAX_TC_NUM]; 5754 u16 tc_size[HCLGE_MAX_TC_NUM]; 5755 u16 roundup_size; 5756 u32 *rss_indir; 5757 int ret, i; 5758 5759 hclge_release_tqp(vport); 5760 5761 ret = hclge_knic_setup(vport, new_tqps_num); 5762 if (ret) { 5763 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); 5764 return ret; 5765 } 5766 5767 ret = hclge_map_tqp_to_vport(hdev, vport); 5768 if (ret) { 5769 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); 5770 return ret; 5771 } 5772 5773 ret = hclge_tm_schd_init(hdev); 5774 if (ret) { 5775 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); 5776 return ret; 5777 } 5778 5779 roundup_size = roundup_pow_of_two(kinfo->rss_size); 5780 roundup_size = ilog2(roundup_size); 5781 /* Set the RSS TC mode according to the new RSS size */ 5782 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 5783 tc_valid[i] = 0; 5784 5785 if (!(hdev->hw_tc_map & BIT(i))) 5786 continue; 5787 5788 tc_valid[i] = 1; 5789 tc_size[i] = roundup_size; 5790 tc_offset[i] = kinfo->rss_size * i; 5791 } 5792 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 5793 if (ret) 5794 return ret; 5795 5796 /* Reinitializes the rss indirect table according to the new RSS size */ 5797 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 5798 if (!rss_indir) 5799 return -ENOMEM; 5800 5801 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 5802 rss_indir[i] = i % kinfo->rss_size; 5803 5804 ret = hclge_set_rss(handle, rss_indir, NULL, 0); 5805 if (ret) 5806 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 5807 ret); 5808 5809 kfree(rss_indir); 5810 5811 if (!ret) 5812 dev_info(&hdev->pdev->dev, 5813 "Channels changed, rss_size from %d to %d, tqps from %d to %d", 5814 cur_rss_size, kinfo->rss_size, 5815 cur_tqps, kinfo->rss_size * kinfo->num_tc); 5816 5817 return ret; 5818 } 5819 5820 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, 5821 u32 *regs_num_64_bit) 5822 { 5823 struct hclge_desc desc; 5824 u32 total_num; 5825 int ret; 5826 5827 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); 5828 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5829 if (ret) { 5830 dev_err(&hdev->pdev->dev, 5831 "Query register number cmd failed, ret = %d.\n", ret); 5832 return ret; 5833 } 5834 5835 *regs_num_32_bit = le32_to_cpu(desc.data[0]); 5836 *regs_num_64_bit = le32_to_cpu(desc.data[1]); 5837 5838 total_num = *regs_num_32_bit + *regs_num_64_bit; 5839 if (!total_num) 5840 return -EINVAL; 5841 5842 return 0; 5843 } 5844 5845 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, 5846 void *data) 5847 { 5848 #define HCLGE_32_BIT_REG_RTN_DATANUM 8 5849 5850 struct hclge_desc *desc; 5851 u32 *reg_val = data; 5852 __le32 *desc_data; 5853 int cmd_num; 5854 int i, k, n; 5855 int ret; 5856 5857 if (regs_num == 0) 5858 return 0; 5859 5860 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); 5861 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 5862 if (!desc) 5863 return -ENOMEM; 5864 5865 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); 5866 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 5867 if (ret) { 5868 dev_err(&hdev->pdev->dev, 5869 "Query 32 bit register cmd failed, ret = %d.\n", ret); 5870 kfree(desc); 5871 return ret; 5872 } 5873 5874 for (i = 0; i < cmd_num; i++) { 5875 if (i == 0) { 5876 desc_data = (__le32 *)(&desc[i].data[0]); 5877 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; 5878 } else { 5879 desc_data = (__le32 *)(&desc[i]); 5880 n = HCLGE_32_BIT_REG_RTN_DATANUM; 5881 } 5882 for (k = 0; k < n; k++) { 5883 *reg_val++ = le32_to_cpu(*desc_data++); 5884 5885 regs_num--; 5886 if (!regs_num) 5887 break; 5888 } 5889 } 5890 5891 kfree(desc); 5892 return 0; 5893 } 5894 5895 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, 5896 void *data) 5897 { 5898 #define HCLGE_64_BIT_REG_RTN_DATANUM 4 5899 5900 struct hclge_desc *desc; 5901 u64 *reg_val = data; 5902 __le64 *desc_data; 5903 int cmd_num; 5904 int i, k, n; 5905 int ret; 5906 5907 if (regs_num == 0) 5908 return 0; 5909 5910 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); 5911 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 5912 if (!desc) 5913 return -ENOMEM; 5914 5915 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); 5916 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 5917 if (ret) { 5918 dev_err(&hdev->pdev->dev, 5919 "Query 64 bit register cmd failed, ret = %d.\n", ret); 5920 kfree(desc); 5921 return ret; 5922 } 5923 5924 for (i = 0; i < cmd_num; i++) { 5925 if (i == 0) { 5926 desc_data = (__le64 *)(&desc[i].data[0]); 5927 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; 5928 } else { 5929 desc_data = (__le64 *)(&desc[i]); 5930 n = HCLGE_64_BIT_REG_RTN_DATANUM; 5931 } 5932 for (k = 0; k < n; k++) { 5933 *reg_val++ = le64_to_cpu(*desc_data++); 5934 5935 regs_num--; 5936 if (!regs_num) 5937 break; 5938 } 5939 } 5940 5941 kfree(desc); 5942 return 0; 5943 } 5944 5945 static int hclge_get_regs_len(struct hnae3_handle *handle) 5946 { 5947 struct hclge_vport *vport = hclge_get_vport(handle); 5948 struct hclge_dev *hdev = vport->back; 5949 u32 regs_num_32_bit, regs_num_64_bit; 5950 int ret; 5951 5952 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 5953 if (ret) { 5954 dev_err(&hdev->pdev->dev, 5955 "Get register number failed, ret = %d.\n", ret); 5956 return -EOPNOTSUPP; 5957 } 5958 5959 return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); 5960 } 5961 5962 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, 5963 void *data) 5964 { 5965 struct hclge_vport *vport = hclge_get_vport(handle); 5966 struct hclge_dev *hdev = vport->back; 5967 u32 regs_num_32_bit, regs_num_64_bit; 5968 int ret; 5969 5970 *version = hdev->fw_version; 5971 5972 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 5973 if (ret) { 5974 dev_err(&hdev->pdev->dev, 5975 "Get register number failed, ret = %d.\n", ret); 5976 return; 5977 } 5978 5979 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data); 5980 if (ret) { 5981 dev_err(&hdev->pdev->dev, 5982 "Get 32 bit register failed, ret = %d.\n", ret); 5983 return; 5984 } 5985 5986 data = (u32 *)data + regs_num_32_bit; 5987 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, 5988 data); 5989 if (ret) 5990 dev_err(&hdev->pdev->dev, 5991 "Get 64 bit register failed, ret = %d.\n", ret); 5992 } 5993 5994 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) 5995 { 5996 struct hclge_set_led_state_cmd *req; 5997 struct hclge_desc desc; 5998 int ret; 5999 6000 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); 6001 6002 req = (struct hclge_set_led_state_cmd *)desc.data; 6003 hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, 6004 HCLGE_LED_LOCATE_STATE_S, locate_led_status); 6005 6006 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6007 if (ret) 6008 dev_err(&hdev->pdev->dev, 6009 "Send set led state cmd error, ret =%d\n", ret); 6010 6011 return ret; 6012 } 6013 6014 enum hclge_led_status { 6015 HCLGE_LED_OFF, 6016 HCLGE_LED_ON, 6017 HCLGE_LED_NO_CHANGE = 0xFF, 6018 }; 6019 6020 static int hclge_set_led_id(struct hnae3_handle *handle, 6021 enum ethtool_phys_id_state status) 6022 { 6023 struct hclge_vport *vport = hclge_get_vport(handle); 6024 struct hclge_dev *hdev = vport->back; 6025 6026 switch (status) { 6027 case ETHTOOL_ID_ACTIVE: 6028 return hclge_set_led_status(hdev, HCLGE_LED_ON); 6029 case ETHTOOL_ID_INACTIVE: 6030 return hclge_set_led_status(hdev, HCLGE_LED_OFF); 6031 default: 6032 return -EINVAL; 6033 } 6034 } 6035 6036 static void hclge_get_link_mode(struct hnae3_handle *handle, 6037 unsigned long *supported, 6038 unsigned long *advertising) 6039 { 6040 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); 6041 struct hclge_vport *vport = hclge_get_vport(handle); 6042 struct hclge_dev *hdev = vport->back; 6043 unsigned int idx = 0; 6044 6045 for (; idx < size; idx++) { 6046 supported[idx] = hdev->hw.mac.supported[idx]; 6047 advertising[idx] = hdev->hw.mac.advertising[idx]; 6048 } 6049 } 6050 6051 static void hclge_get_port_type(struct hnae3_handle *handle, 6052 u8 *port_type) 6053 { 6054 struct hclge_vport *vport = hclge_get_vport(handle); 6055 struct hclge_dev *hdev = vport->back; 6056 u8 media_type = hdev->hw.mac.media_type; 6057 6058 switch (media_type) { 6059 case HNAE3_MEDIA_TYPE_FIBER: 6060 *port_type = PORT_FIBRE; 6061 break; 6062 case HNAE3_MEDIA_TYPE_COPPER: 6063 *port_type = PORT_TP; 6064 break; 6065 case HNAE3_MEDIA_TYPE_UNKNOWN: 6066 default: 6067 *port_type = PORT_OTHER; 6068 break; 6069 } 6070 } 6071 6072 static const struct hnae3_ae_ops hclge_ops = { 6073 .init_ae_dev = hclge_init_ae_dev, 6074 .uninit_ae_dev = hclge_uninit_ae_dev, 6075 .init_client_instance = hclge_init_client_instance, 6076 .uninit_client_instance = hclge_uninit_client_instance, 6077 .map_ring_to_vector = hclge_map_ring_to_vector, 6078 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, 6079 .get_vector = hclge_get_vector, 6080 .put_vector = hclge_put_vector, 6081 .set_promisc_mode = hclge_set_promisc_mode, 6082 .set_loopback = hclge_set_loopback, 6083 .start = hclge_ae_start, 6084 .stop = hclge_ae_stop, 6085 .get_status = hclge_get_status, 6086 .get_ksettings_an_result = hclge_get_ksettings_an_result, 6087 .update_speed_duplex_h = hclge_update_speed_duplex_h, 6088 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, 6089 .get_media_type = hclge_get_media_type, 6090 .get_rss_key_size = hclge_get_rss_key_size, 6091 .get_rss_indir_size = hclge_get_rss_indir_size, 6092 .get_rss = hclge_get_rss, 6093 .set_rss = hclge_set_rss, 6094 .set_rss_tuple = hclge_set_rss_tuple, 6095 .get_rss_tuple = hclge_get_rss_tuple, 6096 .get_tc_size = hclge_get_tc_size, 6097 .get_mac_addr = hclge_get_mac_addr, 6098 .set_mac_addr = hclge_set_mac_addr, 6099 .add_uc_addr = hclge_add_uc_addr, 6100 .rm_uc_addr = hclge_rm_uc_addr, 6101 .add_mc_addr = hclge_add_mc_addr, 6102 .rm_mc_addr = hclge_rm_mc_addr, 6103 .set_autoneg = hclge_set_autoneg, 6104 .get_autoneg = hclge_get_autoneg, 6105 .get_pauseparam = hclge_get_pauseparam, 6106 .set_pauseparam = hclge_set_pauseparam, 6107 .set_mtu = hclge_set_mtu, 6108 .reset_queue = hclge_reset_tqp, 6109 .get_stats = hclge_get_stats, 6110 .update_stats = hclge_update_stats, 6111 .get_strings = hclge_get_strings, 6112 .get_sset_count = hclge_get_sset_count, 6113 .get_fw_version = hclge_get_fw_version, 6114 .get_mdix_mode = hclge_get_mdix_mode, 6115 .enable_vlan_filter = hclge_enable_vlan_filter, 6116 .set_vlan_filter = hclge_set_vlan_filter, 6117 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 6118 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, 6119 .reset_event = hclge_reset_event, 6120 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, 6121 .set_channels = hclge_set_channels, 6122 .get_channels = hclge_get_channels, 6123 .get_flowctrl_adv = hclge_get_flowctrl_adv, 6124 .get_regs_len = hclge_get_regs_len, 6125 .get_regs = hclge_get_regs, 6126 .set_led_id = hclge_set_led_id, 6127 .get_link_mode = hclge_get_link_mode, 6128 .get_port_type = hclge_get_port_type, 6129 }; 6130 6131 static struct hnae3_ae_algo ae_algo = { 6132 .ops = &hclge_ops, 6133 .name = HCLGE_NAME, 6134 .pdev_id_table = ae_algo_pci_tbl, 6135 }; 6136 6137 static int hclge_init(void) 6138 { 6139 pr_info("%s is initializing\n", HCLGE_NAME); 6140 6141 hnae3_register_ae_algo(&ae_algo); 6142 6143 return 0; 6144 } 6145 6146 static void hclge_exit(void) 6147 { 6148 hnae3_unregister_ae_algo(&ae_algo); 6149 } 6150 module_init(hclge_init); 6151 module_exit(hclge_exit); 6152 6153 MODULE_LICENSE("GPL"); 6154 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 6155 MODULE_DESCRIPTION("HCLGE Driver"); 6156 MODULE_VERSION(HCLGE_MOD_VERSION); 6157