1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/dma-mapping.h> 5 #include <linux/etherdevice.h> 6 #include <linux/interrupt.h> 7 #ifdef CONFIG_RFS_ACCEL 8 #include <linux/cpu_rmap.h> 9 #endif 10 #include <linux/if_vlan.h> 11 #include <linux/irq.h> 12 #include <linux/ip.h> 13 #include <linux/ipv6.h> 14 #include <linux/module.h> 15 #include <linux/pci.h> 16 #include <linux/aer.h> 17 #include <linux/skbuff.h> 18 #include <linux/sctp.h> 19 #include <net/gre.h> 20 #include <net/gro.h> 21 #include <net/ip6_checksum.h> 22 #include <net/pkt_cls.h> 23 #include <net/tcp.h> 24 #include <net/vxlan.h> 25 #include <net/geneve.h> 26 27 #include "hnae3.h" 28 #include "hns3_enet.h" 29 /* All hns3 tracepoints are defined by the include below, which 30 * must be included exactly once across the whole kernel with 31 * CREATE_TRACE_POINTS defined 32 */ 33 #define CREATE_TRACE_POINTS 34 #include "hns3_trace.h" 35 36 #define hns3_set_field(origin, shift, val) ((origin) |= (val) << (shift)) 37 #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) 38 39 #define hns3_rl_err(fmt, ...) \ 40 do { \ 41 if (net_ratelimit()) \ 42 netdev_err(fmt, ##__VA_ARGS__); \ 43 } while (0) 44 45 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); 46 47 static const char hns3_driver_name[] = "hns3"; 48 static const char hns3_driver_string[] = 49 "Hisilicon Ethernet Network Driver for Hip08 Family"; 50 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; 51 static struct hnae3_client client; 52 53 static int debug = -1; 54 module_param(debug, int, 0); 55 MODULE_PARM_DESC(debug, " Network interface message level setting"); 56 57 static unsigned int tx_sgl = 1; 58 module_param(tx_sgl, uint, 0600); 59 MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping"); 60 61 static bool page_pool_enabled = true; 62 module_param(page_pool_enabled, bool, 0400); 63 64 #define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \ 65 sizeof(struct sg_table)) 66 #define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \ 67 dma_get_cache_alignment()) 68 69 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 70 NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) 71 72 #define HNS3_INNER_VLAN_TAG 1 73 #define HNS3_OUTER_VLAN_TAG 2 74 75 #define HNS3_MIN_TX_LEN 33U 76 #define HNS3_MIN_TUN_PKT_LEN 65U 77 78 /* hns3_pci_tbl - PCI Device ID Table 79 * 80 * Last entry must be all 0s 81 * 82 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 83 * Class, Class Mask, private data (not used) } 84 */ 85 static const struct pci_device_id hns3_pci_tbl[] = { 86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 88 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 89 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 90 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 91 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 92 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 93 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 94 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 95 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 96 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 97 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 98 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 99 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 100 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 101 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 102 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 103 /* required last entry */ 104 {0,} 105 }; 106 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); 107 108 #define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t) \ 109 { ptype, \ 110 l, \ 111 CHECKSUM_##s, \ 112 HNS3_L3_TYPE_##t, \ 113 1 } 114 115 #define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \ 116 { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0 } 117 118 static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = { 119 HNS3_RX_PTYPE_UNUSED_ENTRY(0), 120 HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP), 121 HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP), 122 HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP), 123 HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL), 124 HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL), 125 HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL), 126 HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM), 127 HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL), 128 HNS3_RX_PTYPE_UNUSED_ENTRY(9), 129 HNS3_RX_PTYPE_UNUSED_ENTRY(10), 130 HNS3_RX_PTYPE_UNUSED_ENTRY(11), 131 HNS3_RX_PTYPE_UNUSED_ENTRY(12), 132 HNS3_RX_PTYPE_UNUSED_ENTRY(13), 133 HNS3_RX_PTYPE_UNUSED_ENTRY(14), 134 HNS3_RX_PTYPE_UNUSED_ENTRY(15), 135 HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL), 136 HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4), 137 HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4), 138 HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4), 139 HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4), 140 HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4), 141 HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4), 142 HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4), 143 HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4), 144 HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4), 145 HNS3_RX_PTYPE_UNUSED_ENTRY(26), 146 HNS3_RX_PTYPE_UNUSED_ENTRY(27), 147 HNS3_RX_PTYPE_UNUSED_ENTRY(28), 148 HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL), 149 HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL), 150 HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4), 151 HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4), 152 HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4), 153 HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4), 154 HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4), 155 HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4), 156 HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4), 157 HNS3_RX_PTYPE_UNUSED_ENTRY(38), 158 HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6), 159 HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6), 160 HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6), 161 HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6), 162 HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6), 163 HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6), 164 HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6), 165 HNS3_RX_PTYPE_UNUSED_ENTRY(46), 166 HNS3_RX_PTYPE_UNUSED_ENTRY(47), 167 HNS3_RX_PTYPE_UNUSED_ENTRY(48), 168 HNS3_RX_PTYPE_UNUSED_ENTRY(49), 169 HNS3_RX_PTYPE_UNUSED_ENTRY(50), 170 HNS3_RX_PTYPE_UNUSED_ENTRY(51), 171 HNS3_RX_PTYPE_UNUSED_ENTRY(52), 172 HNS3_RX_PTYPE_UNUSED_ENTRY(53), 173 HNS3_RX_PTYPE_UNUSED_ENTRY(54), 174 HNS3_RX_PTYPE_UNUSED_ENTRY(55), 175 HNS3_RX_PTYPE_UNUSED_ENTRY(56), 176 HNS3_RX_PTYPE_UNUSED_ENTRY(57), 177 HNS3_RX_PTYPE_UNUSED_ENTRY(58), 178 HNS3_RX_PTYPE_UNUSED_ENTRY(59), 179 HNS3_RX_PTYPE_UNUSED_ENTRY(60), 180 HNS3_RX_PTYPE_UNUSED_ENTRY(61), 181 HNS3_RX_PTYPE_UNUSED_ENTRY(62), 182 HNS3_RX_PTYPE_UNUSED_ENTRY(63), 183 HNS3_RX_PTYPE_UNUSED_ENTRY(64), 184 HNS3_RX_PTYPE_UNUSED_ENTRY(65), 185 HNS3_RX_PTYPE_UNUSED_ENTRY(66), 186 HNS3_RX_PTYPE_UNUSED_ENTRY(67), 187 HNS3_RX_PTYPE_UNUSED_ENTRY(68), 188 HNS3_RX_PTYPE_UNUSED_ENTRY(69), 189 HNS3_RX_PTYPE_UNUSED_ENTRY(70), 190 HNS3_RX_PTYPE_UNUSED_ENTRY(71), 191 HNS3_RX_PTYPE_UNUSED_ENTRY(72), 192 HNS3_RX_PTYPE_UNUSED_ENTRY(73), 193 HNS3_RX_PTYPE_UNUSED_ENTRY(74), 194 HNS3_RX_PTYPE_UNUSED_ENTRY(75), 195 HNS3_RX_PTYPE_UNUSED_ENTRY(76), 196 HNS3_RX_PTYPE_UNUSED_ENTRY(77), 197 HNS3_RX_PTYPE_UNUSED_ENTRY(78), 198 HNS3_RX_PTYPE_UNUSED_ENTRY(79), 199 HNS3_RX_PTYPE_UNUSED_ENTRY(80), 200 HNS3_RX_PTYPE_UNUSED_ENTRY(81), 201 HNS3_RX_PTYPE_UNUSED_ENTRY(82), 202 HNS3_RX_PTYPE_UNUSED_ENTRY(83), 203 HNS3_RX_PTYPE_UNUSED_ENTRY(84), 204 HNS3_RX_PTYPE_UNUSED_ENTRY(85), 205 HNS3_RX_PTYPE_UNUSED_ENTRY(86), 206 HNS3_RX_PTYPE_UNUSED_ENTRY(87), 207 HNS3_RX_PTYPE_UNUSED_ENTRY(88), 208 HNS3_RX_PTYPE_UNUSED_ENTRY(89), 209 HNS3_RX_PTYPE_UNUSED_ENTRY(90), 210 HNS3_RX_PTYPE_UNUSED_ENTRY(91), 211 HNS3_RX_PTYPE_UNUSED_ENTRY(92), 212 HNS3_RX_PTYPE_UNUSED_ENTRY(93), 213 HNS3_RX_PTYPE_UNUSED_ENTRY(94), 214 HNS3_RX_PTYPE_UNUSED_ENTRY(95), 215 HNS3_RX_PTYPE_UNUSED_ENTRY(96), 216 HNS3_RX_PTYPE_UNUSED_ENTRY(97), 217 HNS3_RX_PTYPE_UNUSED_ENTRY(98), 218 HNS3_RX_PTYPE_UNUSED_ENTRY(99), 219 HNS3_RX_PTYPE_UNUSED_ENTRY(100), 220 HNS3_RX_PTYPE_UNUSED_ENTRY(101), 221 HNS3_RX_PTYPE_UNUSED_ENTRY(102), 222 HNS3_RX_PTYPE_UNUSED_ENTRY(103), 223 HNS3_RX_PTYPE_UNUSED_ENTRY(104), 224 HNS3_RX_PTYPE_UNUSED_ENTRY(105), 225 HNS3_RX_PTYPE_UNUSED_ENTRY(106), 226 HNS3_RX_PTYPE_UNUSED_ENTRY(107), 227 HNS3_RX_PTYPE_UNUSED_ENTRY(108), 228 HNS3_RX_PTYPE_UNUSED_ENTRY(109), 229 HNS3_RX_PTYPE_UNUSED_ENTRY(110), 230 HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6), 231 HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6), 232 HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6), 233 HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6), 234 HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6), 235 HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6), 236 HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6), 237 HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6), 238 HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6), 239 HNS3_RX_PTYPE_UNUSED_ENTRY(120), 240 HNS3_RX_PTYPE_UNUSED_ENTRY(121), 241 HNS3_RX_PTYPE_UNUSED_ENTRY(122), 242 HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL), 243 HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL), 244 HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4), 245 HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4), 246 HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4), 247 HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4), 248 HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4), 249 HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4), 250 HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4), 251 HNS3_RX_PTYPE_UNUSED_ENTRY(132), 252 HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6), 253 HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6), 254 HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6), 255 HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6), 256 HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6), 257 HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6), 258 HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6), 259 HNS3_RX_PTYPE_UNUSED_ENTRY(140), 260 HNS3_RX_PTYPE_UNUSED_ENTRY(141), 261 HNS3_RX_PTYPE_UNUSED_ENTRY(142), 262 HNS3_RX_PTYPE_UNUSED_ENTRY(143), 263 HNS3_RX_PTYPE_UNUSED_ENTRY(144), 264 HNS3_RX_PTYPE_UNUSED_ENTRY(145), 265 HNS3_RX_PTYPE_UNUSED_ENTRY(146), 266 HNS3_RX_PTYPE_UNUSED_ENTRY(147), 267 HNS3_RX_PTYPE_UNUSED_ENTRY(148), 268 HNS3_RX_PTYPE_UNUSED_ENTRY(149), 269 HNS3_RX_PTYPE_UNUSED_ENTRY(150), 270 HNS3_RX_PTYPE_UNUSED_ENTRY(151), 271 HNS3_RX_PTYPE_UNUSED_ENTRY(152), 272 HNS3_RX_PTYPE_UNUSED_ENTRY(153), 273 HNS3_RX_PTYPE_UNUSED_ENTRY(154), 274 HNS3_RX_PTYPE_UNUSED_ENTRY(155), 275 HNS3_RX_PTYPE_UNUSED_ENTRY(156), 276 HNS3_RX_PTYPE_UNUSED_ENTRY(157), 277 HNS3_RX_PTYPE_UNUSED_ENTRY(158), 278 HNS3_RX_PTYPE_UNUSED_ENTRY(159), 279 HNS3_RX_PTYPE_UNUSED_ENTRY(160), 280 HNS3_RX_PTYPE_UNUSED_ENTRY(161), 281 HNS3_RX_PTYPE_UNUSED_ENTRY(162), 282 HNS3_RX_PTYPE_UNUSED_ENTRY(163), 283 HNS3_RX_PTYPE_UNUSED_ENTRY(164), 284 HNS3_RX_PTYPE_UNUSED_ENTRY(165), 285 HNS3_RX_PTYPE_UNUSED_ENTRY(166), 286 HNS3_RX_PTYPE_UNUSED_ENTRY(167), 287 HNS3_RX_PTYPE_UNUSED_ENTRY(168), 288 HNS3_RX_PTYPE_UNUSED_ENTRY(169), 289 HNS3_RX_PTYPE_UNUSED_ENTRY(170), 290 HNS3_RX_PTYPE_UNUSED_ENTRY(171), 291 HNS3_RX_PTYPE_UNUSED_ENTRY(172), 292 HNS3_RX_PTYPE_UNUSED_ENTRY(173), 293 HNS3_RX_PTYPE_UNUSED_ENTRY(174), 294 HNS3_RX_PTYPE_UNUSED_ENTRY(175), 295 HNS3_RX_PTYPE_UNUSED_ENTRY(176), 296 HNS3_RX_PTYPE_UNUSED_ENTRY(177), 297 HNS3_RX_PTYPE_UNUSED_ENTRY(178), 298 HNS3_RX_PTYPE_UNUSED_ENTRY(179), 299 HNS3_RX_PTYPE_UNUSED_ENTRY(180), 300 HNS3_RX_PTYPE_UNUSED_ENTRY(181), 301 HNS3_RX_PTYPE_UNUSED_ENTRY(182), 302 HNS3_RX_PTYPE_UNUSED_ENTRY(183), 303 HNS3_RX_PTYPE_UNUSED_ENTRY(184), 304 HNS3_RX_PTYPE_UNUSED_ENTRY(185), 305 HNS3_RX_PTYPE_UNUSED_ENTRY(186), 306 HNS3_RX_PTYPE_UNUSED_ENTRY(187), 307 HNS3_RX_PTYPE_UNUSED_ENTRY(188), 308 HNS3_RX_PTYPE_UNUSED_ENTRY(189), 309 HNS3_RX_PTYPE_UNUSED_ENTRY(190), 310 HNS3_RX_PTYPE_UNUSED_ENTRY(191), 311 HNS3_RX_PTYPE_UNUSED_ENTRY(192), 312 HNS3_RX_PTYPE_UNUSED_ENTRY(193), 313 HNS3_RX_PTYPE_UNUSED_ENTRY(194), 314 HNS3_RX_PTYPE_UNUSED_ENTRY(195), 315 HNS3_RX_PTYPE_UNUSED_ENTRY(196), 316 HNS3_RX_PTYPE_UNUSED_ENTRY(197), 317 HNS3_RX_PTYPE_UNUSED_ENTRY(198), 318 HNS3_RX_PTYPE_UNUSED_ENTRY(199), 319 HNS3_RX_PTYPE_UNUSED_ENTRY(200), 320 HNS3_RX_PTYPE_UNUSED_ENTRY(201), 321 HNS3_RX_PTYPE_UNUSED_ENTRY(202), 322 HNS3_RX_PTYPE_UNUSED_ENTRY(203), 323 HNS3_RX_PTYPE_UNUSED_ENTRY(204), 324 HNS3_RX_PTYPE_UNUSED_ENTRY(205), 325 HNS3_RX_PTYPE_UNUSED_ENTRY(206), 326 HNS3_RX_PTYPE_UNUSED_ENTRY(207), 327 HNS3_RX_PTYPE_UNUSED_ENTRY(208), 328 HNS3_RX_PTYPE_UNUSED_ENTRY(209), 329 HNS3_RX_PTYPE_UNUSED_ENTRY(210), 330 HNS3_RX_PTYPE_UNUSED_ENTRY(211), 331 HNS3_RX_PTYPE_UNUSED_ENTRY(212), 332 HNS3_RX_PTYPE_UNUSED_ENTRY(213), 333 HNS3_RX_PTYPE_UNUSED_ENTRY(214), 334 HNS3_RX_PTYPE_UNUSED_ENTRY(215), 335 HNS3_RX_PTYPE_UNUSED_ENTRY(216), 336 HNS3_RX_PTYPE_UNUSED_ENTRY(217), 337 HNS3_RX_PTYPE_UNUSED_ENTRY(218), 338 HNS3_RX_PTYPE_UNUSED_ENTRY(219), 339 HNS3_RX_PTYPE_UNUSED_ENTRY(220), 340 HNS3_RX_PTYPE_UNUSED_ENTRY(221), 341 HNS3_RX_PTYPE_UNUSED_ENTRY(222), 342 HNS3_RX_PTYPE_UNUSED_ENTRY(223), 343 HNS3_RX_PTYPE_UNUSED_ENTRY(224), 344 HNS3_RX_PTYPE_UNUSED_ENTRY(225), 345 HNS3_RX_PTYPE_UNUSED_ENTRY(226), 346 HNS3_RX_PTYPE_UNUSED_ENTRY(227), 347 HNS3_RX_PTYPE_UNUSED_ENTRY(228), 348 HNS3_RX_PTYPE_UNUSED_ENTRY(229), 349 HNS3_RX_PTYPE_UNUSED_ENTRY(230), 350 HNS3_RX_PTYPE_UNUSED_ENTRY(231), 351 HNS3_RX_PTYPE_UNUSED_ENTRY(232), 352 HNS3_RX_PTYPE_UNUSED_ENTRY(233), 353 HNS3_RX_PTYPE_UNUSED_ENTRY(234), 354 HNS3_RX_PTYPE_UNUSED_ENTRY(235), 355 HNS3_RX_PTYPE_UNUSED_ENTRY(236), 356 HNS3_RX_PTYPE_UNUSED_ENTRY(237), 357 HNS3_RX_PTYPE_UNUSED_ENTRY(238), 358 HNS3_RX_PTYPE_UNUSED_ENTRY(239), 359 HNS3_RX_PTYPE_UNUSED_ENTRY(240), 360 HNS3_RX_PTYPE_UNUSED_ENTRY(241), 361 HNS3_RX_PTYPE_UNUSED_ENTRY(242), 362 HNS3_RX_PTYPE_UNUSED_ENTRY(243), 363 HNS3_RX_PTYPE_UNUSED_ENTRY(244), 364 HNS3_RX_PTYPE_UNUSED_ENTRY(245), 365 HNS3_RX_PTYPE_UNUSED_ENTRY(246), 366 HNS3_RX_PTYPE_UNUSED_ENTRY(247), 367 HNS3_RX_PTYPE_UNUSED_ENTRY(248), 368 HNS3_RX_PTYPE_UNUSED_ENTRY(249), 369 HNS3_RX_PTYPE_UNUSED_ENTRY(250), 370 HNS3_RX_PTYPE_UNUSED_ENTRY(251), 371 HNS3_RX_PTYPE_UNUSED_ENTRY(252), 372 HNS3_RX_PTYPE_UNUSED_ENTRY(253), 373 HNS3_RX_PTYPE_UNUSED_ENTRY(254), 374 HNS3_RX_PTYPE_UNUSED_ENTRY(255), 375 }; 376 377 #define HNS3_INVALID_PTYPE \ 378 ARRAY_SIZE(hns3_rx_ptype_tbl) 379 380 static irqreturn_t hns3_irq_handle(int irq, void *vector) 381 { 382 struct hns3_enet_tqp_vector *tqp_vector = vector; 383 384 napi_schedule_irqoff(&tqp_vector->napi); 385 tqp_vector->event_cnt++; 386 387 return IRQ_HANDLED; 388 } 389 390 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) 391 { 392 struct hns3_enet_tqp_vector *tqp_vectors; 393 unsigned int i; 394 395 for (i = 0; i < priv->vector_num; i++) { 396 tqp_vectors = &priv->tqp_vector[i]; 397 398 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) 399 continue; 400 401 /* clear the affinity mask */ 402 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); 403 404 /* release the irq resource */ 405 free_irq(tqp_vectors->vector_irq, tqp_vectors); 406 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; 407 } 408 } 409 410 static int hns3_nic_init_irq(struct hns3_nic_priv *priv) 411 { 412 struct hns3_enet_tqp_vector *tqp_vectors; 413 int txrx_int_idx = 0; 414 int rx_int_idx = 0; 415 int tx_int_idx = 0; 416 unsigned int i; 417 int ret; 418 419 for (i = 0; i < priv->vector_num; i++) { 420 tqp_vectors = &priv->tqp_vector[i]; 421 422 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) 423 continue; 424 425 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { 426 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 427 "%s-%s-%s-%d", hns3_driver_name, 428 pci_name(priv->ae_handle->pdev), 429 "TxRx", txrx_int_idx++); 430 txrx_int_idx++; 431 } else if (tqp_vectors->rx_group.ring) { 432 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 433 "%s-%s-%s-%d", hns3_driver_name, 434 pci_name(priv->ae_handle->pdev), 435 "Rx", rx_int_idx++); 436 } else if (tqp_vectors->tx_group.ring) { 437 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 438 "%s-%s-%s-%d", hns3_driver_name, 439 pci_name(priv->ae_handle->pdev), 440 "Tx", tx_int_idx++); 441 } else { 442 /* Skip this unused q_vector */ 443 continue; 444 } 445 446 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; 447 448 irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); 449 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, 450 tqp_vectors->name, tqp_vectors); 451 if (ret) { 452 netdev_err(priv->netdev, "request irq(%d) fail\n", 453 tqp_vectors->vector_irq); 454 hns3_nic_uninit_irq(priv); 455 return ret; 456 } 457 458 irq_set_affinity_hint(tqp_vectors->vector_irq, 459 &tqp_vectors->affinity_mask); 460 461 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; 462 } 463 464 return 0; 465 } 466 467 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, 468 u32 mask_en) 469 { 470 writel(mask_en, tqp_vector->mask_addr); 471 } 472 473 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) 474 { 475 napi_enable(&tqp_vector->napi); 476 enable_irq(tqp_vector->vector_irq); 477 478 /* enable vector */ 479 hns3_mask_vector_irq(tqp_vector, 1); 480 } 481 482 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) 483 { 484 /* disable vector */ 485 hns3_mask_vector_irq(tqp_vector, 0); 486 487 disable_irq(tqp_vector->vector_irq); 488 napi_disable(&tqp_vector->napi); 489 cancel_work_sync(&tqp_vector->rx_group.dim.work); 490 cancel_work_sync(&tqp_vector->tx_group.dim.work); 491 } 492 493 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, 494 u32 rl_value) 495 { 496 u32 rl_reg = hns3_rl_usec_to_reg(rl_value); 497 498 /* this defines the configuration for RL (Interrupt Rate Limiter). 499 * Rl defines rate of interrupts i.e. number of interrupts-per-second 500 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing 501 */ 502 if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable && 503 !tqp_vector->rx_group.coal.adapt_enable) 504 /* According to the hardware, the range of rl_reg is 505 * 0-59 and the unit is 4. 506 */ 507 rl_reg |= HNS3_INT_RL_ENABLE_MASK; 508 509 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); 510 } 511 512 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, 513 u32 gl_value) 514 { 515 u32 new_val; 516 517 if (tqp_vector->rx_group.coal.unit_1us) 518 new_val = gl_value | HNS3_INT_GL_1US; 519 else 520 new_val = hns3_gl_usec_to_reg(gl_value); 521 522 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); 523 } 524 525 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, 526 u32 gl_value) 527 { 528 u32 new_val; 529 530 if (tqp_vector->tx_group.coal.unit_1us) 531 new_val = gl_value | HNS3_INT_GL_1US; 532 else 533 new_val = hns3_gl_usec_to_reg(gl_value); 534 535 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); 536 } 537 538 void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, 539 u32 ql_value) 540 { 541 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET); 542 } 543 544 void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector, 545 u32 ql_value) 546 { 547 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET); 548 } 549 550 static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector, 551 struct hns3_nic_priv *priv) 552 { 553 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); 554 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; 555 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; 556 struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal; 557 struct hns3_enet_coalesce *prx_coal = &priv->rx_coal; 558 559 tx_coal->adapt_enable = ptx_coal->adapt_enable; 560 rx_coal->adapt_enable = prx_coal->adapt_enable; 561 562 tx_coal->int_gl = ptx_coal->int_gl; 563 rx_coal->int_gl = prx_coal->int_gl; 564 565 rx_coal->flow_level = prx_coal->flow_level; 566 tx_coal->flow_level = ptx_coal->flow_level; 567 568 /* device version above V3(include V3), GL can configure 1us 569 * unit, so uses 1us unit. 570 */ 571 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { 572 tx_coal->unit_1us = 1; 573 rx_coal->unit_1us = 1; 574 } 575 576 if (ae_dev->dev_specs.int_ql_max) { 577 tx_coal->ql_enable = 1; 578 rx_coal->ql_enable = 1; 579 tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; 580 rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; 581 tx_coal->int_ql = ptx_coal->int_ql; 582 rx_coal->int_ql = prx_coal->int_ql; 583 } 584 } 585 586 static void 587 hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector, 588 struct hns3_nic_priv *priv) 589 { 590 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; 591 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; 592 struct hnae3_handle *h = priv->ae_handle; 593 594 hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl); 595 hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl); 596 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); 597 598 if (tx_coal->ql_enable) 599 hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql); 600 601 if (rx_coal->ql_enable) 602 hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql); 603 } 604 605 static int hns3_nic_set_real_num_queue(struct net_device *netdev) 606 { 607 struct hnae3_handle *h = hns3_get_handle(netdev); 608 struct hnae3_knic_private_info *kinfo = &h->kinfo; 609 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 610 unsigned int queue_size = kinfo->num_tqps; 611 int i, ret; 612 613 if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) { 614 netdev_reset_tc(netdev); 615 } else { 616 ret = netdev_set_num_tc(netdev, tc_info->num_tc); 617 if (ret) { 618 netdev_err(netdev, 619 "netdev_set_num_tc fail, ret=%d!\n", ret); 620 return ret; 621 } 622 623 for (i = 0; i < tc_info->num_tc; i++) 624 netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], 625 tc_info->tqp_offset[i]); 626 } 627 628 ret = netif_set_real_num_tx_queues(netdev, queue_size); 629 if (ret) { 630 netdev_err(netdev, 631 "netif_set_real_num_tx_queues fail, ret=%d!\n", ret); 632 return ret; 633 } 634 635 ret = netif_set_real_num_rx_queues(netdev, queue_size); 636 if (ret) { 637 netdev_err(netdev, 638 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 639 return ret; 640 } 641 642 return 0; 643 } 644 645 u16 hns3_get_max_available_channels(struct hnae3_handle *h) 646 { 647 u16 alloc_tqps, max_rss_size, rss_size; 648 649 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); 650 rss_size = alloc_tqps / h->kinfo.tc_info.num_tc; 651 652 return min_t(u16, rss_size, max_rss_size); 653 } 654 655 static void hns3_tqp_enable(struct hnae3_queue *tqp) 656 { 657 u32 rcb_reg; 658 659 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 660 rcb_reg |= BIT(HNS3_RING_EN_B); 661 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 662 } 663 664 static void hns3_tqp_disable(struct hnae3_queue *tqp) 665 { 666 u32 rcb_reg; 667 668 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 669 rcb_reg &= ~BIT(HNS3_RING_EN_B); 670 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 671 } 672 673 static void hns3_free_rx_cpu_rmap(struct net_device *netdev) 674 { 675 #ifdef CONFIG_RFS_ACCEL 676 free_irq_cpu_rmap(netdev->rx_cpu_rmap); 677 netdev->rx_cpu_rmap = NULL; 678 #endif 679 } 680 681 static int hns3_set_rx_cpu_rmap(struct net_device *netdev) 682 { 683 #ifdef CONFIG_RFS_ACCEL 684 struct hns3_nic_priv *priv = netdev_priv(netdev); 685 struct hns3_enet_tqp_vector *tqp_vector; 686 int i, ret; 687 688 if (!netdev->rx_cpu_rmap) { 689 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); 690 if (!netdev->rx_cpu_rmap) 691 return -ENOMEM; 692 } 693 694 for (i = 0; i < priv->vector_num; i++) { 695 tqp_vector = &priv->tqp_vector[i]; 696 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, 697 tqp_vector->vector_irq); 698 if (ret) { 699 hns3_free_rx_cpu_rmap(netdev); 700 return ret; 701 } 702 } 703 #endif 704 return 0; 705 } 706 707 static int hns3_nic_net_up(struct net_device *netdev) 708 { 709 struct hns3_nic_priv *priv = netdev_priv(netdev); 710 struct hnae3_handle *h = priv->ae_handle; 711 int i, j; 712 int ret; 713 714 ret = hns3_nic_reset_all_ring(h); 715 if (ret) 716 return ret; 717 718 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); 719 720 /* enable the vectors */ 721 for (i = 0; i < priv->vector_num; i++) 722 hns3_vector_enable(&priv->tqp_vector[i]); 723 724 /* enable rcb */ 725 for (j = 0; j < h->kinfo.num_tqps; j++) 726 hns3_tqp_enable(h->kinfo.tqp[j]); 727 728 /* start the ae_dev */ 729 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; 730 if (ret) { 731 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 732 while (j--) 733 hns3_tqp_disable(h->kinfo.tqp[j]); 734 735 for (j = i - 1; j >= 0; j--) 736 hns3_vector_disable(&priv->tqp_vector[j]); 737 } 738 739 return ret; 740 } 741 742 static void hns3_config_xps(struct hns3_nic_priv *priv) 743 { 744 int i; 745 746 for (i = 0; i < priv->vector_num; i++) { 747 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; 748 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; 749 750 while (ring) { 751 int ret; 752 753 ret = netif_set_xps_queue(priv->netdev, 754 &tqp_vector->affinity_mask, 755 ring->tqp->tqp_index); 756 if (ret) 757 netdev_warn(priv->netdev, 758 "set xps queue failed: %d", ret); 759 760 ring = ring->next; 761 } 762 } 763 } 764 765 static int hns3_nic_net_open(struct net_device *netdev) 766 { 767 struct hns3_nic_priv *priv = netdev_priv(netdev); 768 struct hnae3_handle *h = hns3_get_handle(netdev); 769 struct hnae3_knic_private_info *kinfo; 770 int i, ret; 771 772 if (hns3_nic_resetting(netdev)) 773 return -EBUSY; 774 775 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 776 netdev_warn(netdev, "net open repeatedly!\n"); 777 return 0; 778 } 779 780 netif_carrier_off(netdev); 781 782 ret = hns3_nic_set_real_num_queue(netdev); 783 if (ret) 784 return ret; 785 786 ret = hns3_nic_net_up(netdev); 787 if (ret) { 788 netdev_err(netdev, "net up fail, ret=%d!\n", ret); 789 return ret; 790 } 791 792 kinfo = &h->kinfo; 793 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 794 netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]); 795 796 if (h->ae_algo->ops->set_timer_task) 797 h->ae_algo->ops->set_timer_task(priv->ae_handle, true); 798 799 hns3_config_xps(priv); 800 801 netif_dbg(h, drv, netdev, "net open\n"); 802 803 return 0; 804 } 805 806 static void hns3_reset_tx_queue(struct hnae3_handle *h) 807 { 808 struct net_device *ndev = h->kinfo.netdev; 809 struct hns3_nic_priv *priv = netdev_priv(ndev); 810 struct netdev_queue *dev_queue; 811 u32 i; 812 813 for (i = 0; i < h->kinfo.num_tqps; i++) { 814 dev_queue = netdev_get_tx_queue(ndev, 815 priv->ring[i].queue_index); 816 netdev_tx_reset_queue(dev_queue); 817 } 818 } 819 820 static void hns3_nic_net_down(struct net_device *netdev) 821 { 822 struct hns3_nic_priv *priv = netdev_priv(netdev); 823 struct hnae3_handle *h = hns3_get_handle(netdev); 824 const struct hnae3_ae_ops *ops; 825 int i; 826 827 /* disable vectors */ 828 for (i = 0; i < priv->vector_num; i++) 829 hns3_vector_disable(&priv->tqp_vector[i]); 830 831 /* disable rcb */ 832 for (i = 0; i < h->kinfo.num_tqps; i++) 833 hns3_tqp_disable(h->kinfo.tqp[i]); 834 835 /* stop ae_dev */ 836 ops = priv->ae_handle->ae_algo->ops; 837 if (ops->stop) 838 ops->stop(priv->ae_handle); 839 840 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet 841 * during reset process, because driver may not be able 842 * to disable the ring through firmware when downing the netdev. 843 */ 844 if (!hns3_nic_resetting(netdev)) 845 hns3_clear_all_ring(priv->ae_handle, false); 846 847 hns3_reset_tx_queue(priv->ae_handle); 848 } 849 850 static int hns3_nic_net_stop(struct net_device *netdev) 851 { 852 struct hns3_nic_priv *priv = netdev_priv(netdev); 853 struct hnae3_handle *h = hns3_get_handle(netdev); 854 855 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 856 return 0; 857 858 netif_dbg(h, drv, netdev, "net stop\n"); 859 860 if (h->ae_algo->ops->set_timer_task) 861 h->ae_algo->ops->set_timer_task(priv->ae_handle, false); 862 863 netif_carrier_off(netdev); 864 netif_tx_disable(netdev); 865 866 hns3_nic_net_down(netdev); 867 868 return 0; 869 } 870 871 static int hns3_nic_uc_sync(struct net_device *netdev, 872 const unsigned char *addr) 873 { 874 struct hnae3_handle *h = hns3_get_handle(netdev); 875 876 if (h->ae_algo->ops->add_uc_addr) 877 return h->ae_algo->ops->add_uc_addr(h, addr); 878 879 return 0; 880 } 881 882 static int hns3_nic_uc_unsync(struct net_device *netdev, 883 const unsigned char *addr) 884 { 885 struct hnae3_handle *h = hns3_get_handle(netdev); 886 887 /* need ignore the request of removing device address, because 888 * we store the device address and other addresses of uc list 889 * in the function's mac filter list. 890 */ 891 if (ether_addr_equal(addr, netdev->dev_addr)) 892 return 0; 893 894 if (h->ae_algo->ops->rm_uc_addr) 895 return h->ae_algo->ops->rm_uc_addr(h, addr); 896 897 return 0; 898 } 899 900 static int hns3_nic_mc_sync(struct net_device *netdev, 901 const unsigned char *addr) 902 { 903 struct hnae3_handle *h = hns3_get_handle(netdev); 904 905 if (h->ae_algo->ops->add_mc_addr) 906 return h->ae_algo->ops->add_mc_addr(h, addr); 907 908 return 0; 909 } 910 911 static int hns3_nic_mc_unsync(struct net_device *netdev, 912 const unsigned char *addr) 913 { 914 struct hnae3_handle *h = hns3_get_handle(netdev); 915 916 if (h->ae_algo->ops->rm_mc_addr) 917 return h->ae_algo->ops->rm_mc_addr(h, addr); 918 919 return 0; 920 } 921 922 static u8 hns3_get_netdev_flags(struct net_device *netdev) 923 { 924 u8 flags = 0; 925 926 if (netdev->flags & IFF_PROMISC) 927 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE; 928 else if (netdev->flags & IFF_ALLMULTI) 929 flags = HNAE3_USER_MPE; 930 931 return flags; 932 } 933 934 static void hns3_nic_set_rx_mode(struct net_device *netdev) 935 { 936 struct hnae3_handle *h = hns3_get_handle(netdev); 937 u8 new_flags; 938 939 new_flags = hns3_get_netdev_flags(netdev); 940 941 __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); 942 __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync); 943 944 /* User mode Promisc mode enable and vlan filtering is disabled to 945 * let all packets in. 946 */ 947 h->netdev_flags = new_flags; 948 hns3_request_update_promisc_mode(h); 949 } 950 951 void hns3_request_update_promisc_mode(struct hnae3_handle *handle) 952 { 953 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 954 955 if (ops->request_update_promisc_mode) 956 ops->request_update_promisc_mode(handle); 957 } 958 959 static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring) 960 { 961 struct hns3_tx_spare *tx_spare = ring->tx_spare; 962 u32 ntc, ntu; 963 964 /* This smp_load_acquire() pairs with smp_store_release() in 965 * hns3_tx_spare_update() called in tx desc cleaning process. 966 */ 967 ntc = smp_load_acquire(&tx_spare->last_to_clean); 968 ntu = tx_spare->next_to_use; 969 970 if (ntc > ntu) 971 return ntc - ntu - 1; 972 973 /* The free tx buffer is divided into two part, so pick the 974 * larger one. 975 */ 976 return max(ntc, tx_spare->len - ntu) - 1; 977 } 978 979 static void hns3_tx_spare_update(struct hns3_enet_ring *ring) 980 { 981 struct hns3_tx_spare *tx_spare = ring->tx_spare; 982 983 if (!tx_spare || 984 tx_spare->last_to_clean == tx_spare->next_to_clean) 985 return; 986 987 /* This smp_store_release() pairs with smp_load_acquire() in 988 * hns3_tx_spare_space() called in xmit process. 989 */ 990 smp_store_release(&tx_spare->last_to_clean, 991 tx_spare->next_to_clean); 992 } 993 994 static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring, 995 struct sk_buff *skb, 996 u32 space) 997 { 998 u32 len = skb->len <= ring->tx_copybreak ? skb->len : 999 skb_headlen(skb); 1000 1001 if (len > ring->tx_copybreak) 1002 return false; 1003 1004 if (ALIGN(len, dma_get_cache_alignment()) > space) { 1005 hns3_ring_stats_update(ring, tx_spare_full); 1006 return false; 1007 } 1008 1009 return true; 1010 } 1011 1012 static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, 1013 struct sk_buff *skb, 1014 u32 space) 1015 { 1016 if (skb->len <= ring->tx_copybreak || !tx_sgl || 1017 (!skb_has_frag_list(skb) && 1018 skb_shinfo(skb)->nr_frags < tx_sgl)) 1019 return false; 1020 1021 if (space < HNS3_MAX_SGL_SIZE) { 1022 hns3_ring_stats_update(ring, tx_spare_full); 1023 return false; 1024 } 1025 1026 return true; 1027 } 1028 1029 static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) 1030 { 1031 struct hns3_tx_spare *tx_spare; 1032 struct page *page; 1033 u32 alloc_size; 1034 dma_addr_t dma; 1035 int order; 1036 1037 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; 1038 if (!alloc_size) 1039 return; 1040 1041 order = get_order(alloc_size); 1042 tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare), 1043 GFP_KERNEL); 1044 if (!tx_spare) { 1045 /* The driver still work without the tx spare buffer */ 1046 dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n"); 1047 return; 1048 } 1049 1050 page = alloc_pages_node(dev_to_node(ring_to_dev(ring)), 1051 GFP_KERNEL, order); 1052 if (!page) { 1053 dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n"); 1054 devm_kfree(ring_to_dev(ring), tx_spare); 1055 return; 1056 } 1057 1058 dma = dma_map_page(ring_to_dev(ring), page, 0, 1059 PAGE_SIZE << order, DMA_TO_DEVICE); 1060 if (dma_mapping_error(ring_to_dev(ring), dma)) { 1061 dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n"); 1062 put_page(page); 1063 devm_kfree(ring_to_dev(ring), tx_spare); 1064 return; 1065 } 1066 1067 tx_spare->dma = dma; 1068 tx_spare->buf = page_address(page); 1069 tx_spare->len = PAGE_SIZE << order; 1070 ring->tx_spare = tx_spare; 1071 } 1072 1073 /* Use hns3_tx_spare_space() to make sure there is enough buffer 1074 * before calling below function to allocate tx buffer. 1075 */ 1076 static void *hns3_tx_spare_alloc(struct hns3_enet_ring *ring, 1077 unsigned int size, dma_addr_t *dma, 1078 u32 *cb_len) 1079 { 1080 struct hns3_tx_spare *tx_spare = ring->tx_spare; 1081 u32 ntu = tx_spare->next_to_use; 1082 1083 size = ALIGN(size, dma_get_cache_alignment()); 1084 *cb_len = size; 1085 1086 /* Tx spare buffer wraps back here because the end of 1087 * freed tx buffer is not enough. 1088 */ 1089 if (ntu + size > tx_spare->len) { 1090 *cb_len += (tx_spare->len - ntu); 1091 ntu = 0; 1092 } 1093 1094 tx_spare->next_to_use = ntu + size; 1095 if (tx_spare->next_to_use == tx_spare->len) 1096 tx_spare->next_to_use = 0; 1097 1098 *dma = tx_spare->dma + ntu; 1099 1100 return tx_spare->buf + ntu; 1101 } 1102 1103 static void hns3_tx_spare_rollback(struct hns3_enet_ring *ring, u32 len) 1104 { 1105 struct hns3_tx_spare *tx_spare = ring->tx_spare; 1106 1107 if (len > tx_spare->next_to_use) { 1108 len -= tx_spare->next_to_use; 1109 tx_spare->next_to_use = tx_spare->len - len; 1110 } else { 1111 tx_spare->next_to_use -= len; 1112 } 1113 } 1114 1115 static void hns3_tx_spare_reclaim_cb(struct hns3_enet_ring *ring, 1116 struct hns3_desc_cb *cb) 1117 { 1118 struct hns3_tx_spare *tx_spare = ring->tx_spare; 1119 u32 ntc = tx_spare->next_to_clean; 1120 u32 len = cb->length; 1121 1122 tx_spare->next_to_clean += len; 1123 1124 if (tx_spare->next_to_clean >= tx_spare->len) { 1125 tx_spare->next_to_clean -= tx_spare->len; 1126 1127 if (tx_spare->next_to_clean) { 1128 ntc = 0; 1129 len = tx_spare->next_to_clean; 1130 } 1131 } 1132 1133 /* This tx spare buffer is only really reclaimed after calling 1134 * hns3_tx_spare_update(), so it is still safe to use the info in 1135 * the tx buffer to do the dma sync or sg unmapping after 1136 * tx_spare->next_to_clean is moved forword. 1137 */ 1138 if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) { 1139 dma_addr_t dma = tx_spare->dma + ntc; 1140 1141 dma_sync_single_for_cpu(ring_to_dev(ring), dma, len, 1142 DMA_TO_DEVICE); 1143 } else { 1144 struct sg_table *sgt = tx_spare->buf + ntc; 1145 1146 dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, 1147 DMA_TO_DEVICE); 1148 } 1149 } 1150 1151 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs, 1152 u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes) 1153 { 1154 u32 l4_offset, hdr_len; 1155 union l3_hdr_info l3; 1156 union l4_hdr_info l4; 1157 u32 l4_paylen; 1158 int ret; 1159 1160 if (!skb_is_gso(skb)) 1161 return 0; 1162 1163 ret = skb_cow_head(skb, 0); 1164 if (unlikely(ret < 0)) 1165 return ret; 1166 1167 l3.hdr = skb_network_header(skb); 1168 l4.hdr = skb_transport_header(skb); 1169 1170 /* Software should clear the IPv4's checksum field when tso is 1171 * needed. 1172 */ 1173 if (l3.v4->version == 4) 1174 l3.v4->check = 0; 1175 1176 /* tunnel packet */ 1177 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 1178 SKB_GSO_GRE_CSUM | 1179 SKB_GSO_UDP_TUNNEL | 1180 SKB_GSO_UDP_TUNNEL_CSUM)) { 1181 /* reset l3&l4 pointers from outer to inner headers */ 1182 l3.hdr = skb_inner_network_header(skb); 1183 l4.hdr = skb_inner_transport_header(skb); 1184 1185 /* Software should clear the IPv4's checksum field when 1186 * tso is needed. 1187 */ 1188 if (l3.v4->version == 4) 1189 l3.v4->check = 0; 1190 } 1191 1192 /* normal or tunnel packet */ 1193 l4_offset = l4.hdr - skb->data; 1194 1195 /* remove payload length from inner pseudo checksum when tso */ 1196 l4_paylen = skb->len - l4_offset; 1197 1198 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 1199 hdr_len = sizeof(*l4.udp) + l4_offset; 1200 csum_replace_by_diff(&l4.udp->check, 1201 (__force __wsum)htonl(l4_paylen)); 1202 } else { 1203 hdr_len = (l4.tcp->doff << 2) + l4_offset; 1204 csum_replace_by_diff(&l4.tcp->check, 1205 (__force __wsum)htonl(l4_paylen)); 1206 } 1207 1208 *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len; 1209 1210 /* find the txbd field values */ 1211 *paylen_fdop_ol4cs = skb->len - hdr_len; 1212 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); 1213 1214 /* offload outer UDP header checksum */ 1215 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) 1216 hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1); 1217 1218 /* get MSS for TSO */ 1219 *mss = skb_shinfo(skb)->gso_size; 1220 1221 trace_hns3_tso(skb); 1222 1223 return 0; 1224 } 1225 1226 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, 1227 u8 *il4_proto) 1228 { 1229 union l3_hdr_info l3; 1230 unsigned char *l4_hdr; 1231 unsigned char *exthdr; 1232 u8 l4_proto_tmp; 1233 __be16 frag_off; 1234 1235 /* find outer header point */ 1236 l3.hdr = skb_network_header(skb); 1237 l4_hdr = skb_transport_header(skb); 1238 1239 if (skb->protocol == htons(ETH_P_IPV6)) { 1240 exthdr = l3.hdr + sizeof(*l3.v6); 1241 l4_proto_tmp = l3.v6->nexthdr; 1242 if (l4_hdr != exthdr) 1243 ipv6_skip_exthdr(skb, exthdr - skb->data, 1244 &l4_proto_tmp, &frag_off); 1245 } else if (skb->protocol == htons(ETH_P_IP)) { 1246 l4_proto_tmp = l3.v4->protocol; 1247 } else { 1248 return -EINVAL; 1249 } 1250 1251 *ol4_proto = l4_proto_tmp; 1252 1253 /* tunnel packet */ 1254 if (!skb->encapsulation) { 1255 *il4_proto = 0; 1256 return 0; 1257 } 1258 1259 /* find inner header point */ 1260 l3.hdr = skb_inner_network_header(skb); 1261 l4_hdr = skb_inner_transport_header(skb); 1262 1263 if (l3.v6->version == 6) { 1264 exthdr = l3.hdr + sizeof(*l3.v6); 1265 l4_proto_tmp = l3.v6->nexthdr; 1266 if (l4_hdr != exthdr) 1267 ipv6_skip_exthdr(skb, exthdr - skb->data, 1268 &l4_proto_tmp, &frag_off); 1269 } else if (l3.v4->version == 4) { 1270 l4_proto_tmp = l3.v4->protocol; 1271 } 1272 1273 *il4_proto = l4_proto_tmp; 1274 1275 return 0; 1276 } 1277 1278 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL 1279 * and it is udp packet, which has a dest port as the IANA assigned. 1280 * the hardware is expected to do the checksum offload, but the 1281 * hardware will not do the checksum offload when udp dest port is 1282 * 4789, 4790 or 6081. 1283 */ 1284 static bool hns3_tunnel_csum_bug(struct sk_buff *skb) 1285 { 1286 struct hns3_nic_priv *priv = netdev_priv(skb->dev); 1287 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); 1288 union l4_hdr_info l4; 1289 1290 /* device version above V3(include V3), the hardware can 1291 * do this checksum offload. 1292 */ 1293 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 1294 return false; 1295 1296 l4.hdr = skb_transport_header(skb); 1297 1298 if (!(!skb->encapsulation && 1299 (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || 1300 l4.udp->dest == htons(GENEVE_UDP_PORT) || 1301 l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT)))) 1302 return false; 1303 1304 return true; 1305 } 1306 1307 static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 1308 u32 *ol_type_vlan_len_msec) 1309 { 1310 u32 l2_len, l3_len, l4_len; 1311 unsigned char *il2_hdr; 1312 union l3_hdr_info l3; 1313 union l4_hdr_info l4; 1314 1315 l3.hdr = skb_network_header(skb); 1316 l4.hdr = skb_transport_header(skb); 1317 1318 /* compute OL2 header size, defined in 2 Bytes */ 1319 l2_len = l3.hdr - skb->data; 1320 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1); 1321 1322 /* compute OL3 header size, defined in 4 Bytes */ 1323 l3_len = l4.hdr - l3.hdr; 1324 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2); 1325 1326 il2_hdr = skb_inner_mac_header(skb); 1327 /* compute OL4 header size, defined in 4 Bytes */ 1328 l4_len = il2_hdr - l4.hdr; 1329 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2); 1330 1331 /* define outer network header type */ 1332 if (skb->protocol == htons(ETH_P_IP)) { 1333 if (skb_is_gso(skb)) 1334 hns3_set_field(*ol_type_vlan_len_msec, 1335 HNS3_TXD_OL3T_S, 1336 HNS3_OL3T_IPV4_CSUM); 1337 else 1338 hns3_set_field(*ol_type_vlan_len_msec, 1339 HNS3_TXD_OL3T_S, 1340 HNS3_OL3T_IPV4_NO_CSUM); 1341 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1342 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, 1343 HNS3_OL3T_IPV6); 1344 } 1345 1346 if (ol4_proto == IPPROTO_UDP) 1347 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 1348 HNS3_TUN_MAC_IN_UDP); 1349 else if (ol4_proto == IPPROTO_GRE) 1350 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 1351 HNS3_TUN_NVGRE); 1352 } 1353 1354 static void hns3_set_l3_type(struct sk_buff *skb, union l3_hdr_info l3, 1355 u32 *type_cs_vlan_tso) 1356 { 1357 if (l3.v4->version == 4) { 1358 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 1359 HNS3_L3T_IPV4); 1360 1361 /* the stack computes the IP header already, the only time we 1362 * need the hardware to recompute it is in the case of TSO. 1363 */ 1364 if (skb_is_gso(skb)) 1365 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); 1366 } else if (l3.v6->version == 6) { 1367 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 1368 HNS3_L3T_IPV6); 1369 } 1370 } 1371 1372 static int hns3_set_l4_csum_length(struct sk_buff *skb, union l4_hdr_info l4, 1373 u32 l4_proto, u32 *type_cs_vlan_tso) 1374 { 1375 /* compute inner(/normal) L4 header size, defined in 4 Bytes */ 1376 switch (l4_proto) { 1377 case IPPROTO_TCP: 1378 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 1379 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 1380 HNS3_L4T_TCP); 1381 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 1382 l4.tcp->doff); 1383 break; 1384 case IPPROTO_UDP: 1385 if (hns3_tunnel_csum_bug(skb)) { 1386 int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN); 1387 1388 return ret ? ret : skb_checksum_help(skb); 1389 } 1390 1391 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 1392 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 1393 HNS3_L4T_UDP); 1394 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 1395 (sizeof(struct udphdr) >> 2)); 1396 break; 1397 case IPPROTO_SCTP: 1398 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 1399 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 1400 HNS3_L4T_SCTP); 1401 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 1402 (sizeof(struct sctphdr) >> 2)); 1403 break; 1404 default: 1405 /* drop the skb tunnel packet if hardware don't support, 1406 * because hardware can't calculate csum when TSO. 1407 */ 1408 if (skb_is_gso(skb)) 1409 return -EDOM; 1410 1411 /* the stack computes the IP header already, 1412 * driver calculate l4 checksum when not TSO. 1413 */ 1414 return skb_checksum_help(skb); 1415 } 1416 1417 return 0; 1418 } 1419 1420 static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 1421 u8 il4_proto, u32 *type_cs_vlan_tso, 1422 u32 *ol_type_vlan_len_msec) 1423 { 1424 unsigned char *l2_hdr = skb->data; 1425 u32 l4_proto = ol4_proto; 1426 union l4_hdr_info l4; 1427 union l3_hdr_info l3; 1428 u32 l2_len, l3_len; 1429 1430 l4.hdr = skb_transport_header(skb); 1431 l3.hdr = skb_network_header(skb); 1432 1433 /* handle encapsulation skb */ 1434 if (skb->encapsulation) { 1435 /* If this is a not UDP/GRE encapsulation skb */ 1436 if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { 1437 /* drop the skb tunnel packet if hardware don't support, 1438 * because hardware can't calculate csum when TSO. 1439 */ 1440 if (skb_is_gso(skb)) 1441 return -EDOM; 1442 1443 /* the stack computes the IP header already, 1444 * driver calculate l4 checksum when not TSO. 1445 */ 1446 return skb_checksum_help(skb); 1447 } 1448 1449 hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); 1450 1451 /* switch to inner header */ 1452 l2_hdr = skb_inner_mac_header(skb); 1453 l3.hdr = skb_inner_network_header(skb); 1454 l4.hdr = skb_inner_transport_header(skb); 1455 l4_proto = il4_proto; 1456 } 1457 1458 hns3_set_l3_type(skb, l3, type_cs_vlan_tso); 1459 1460 /* compute inner(/normal) L2 header size, defined in 2 Bytes */ 1461 l2_len = l3.hdr - l2_hdr; 1462 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); 1463 1464 /* compute inner(/normal) L3 header size, defined in 4 Bytes */ 1465 l3_len = l4.hdr - l3.hdr; 1466 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); 1467 1468 return hns3_set_l4_csum_length(skb, l4, l4_proto, type_cs_vlan_tso); 1469 } 1470 1471 static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, 1472 struct sk_buff *skb) 1473 { 1474 struct hnae3_handle *handle = tx_ring->tqp->handle; 1475 struct hnae3_ae_dev *ae_dev; 1476 struct vlan_ethhdr *vhdr; 1477 int rc; 1478 1479 if (!(skb->protocol == htons(ETH_P_8021Q) || 1480 skb_vlan_tag_present(skb))) 1481 return 0; 1482 1483 /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert 1484 * VLAN enabled, only one VLAN header is allowed in skb, otherwise it 1485 * will cause RAS error. 1486 */ 1487 ae_dev = pci_get_drvdata(handle->pdev); 1488 if (unlikely(skb_vlan_tagged_multi(skb) && 1489 ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && 1490 handle->port_base_vlan_state == 1491 HNAE3_PORT_BASE_VLAN_ENABLE)) 1492 return -EINVAL; 1493 1494 if (skb->protocol == htons(ETH_P_8021Q) && 1495 !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 1496 /* When HW VLAN acceleration is turned off, and the stack 1497 * sets the protocol to 802.1q, the driver just need to 1498 * set the protocol to the encapsulated ethertype. 1499 */ 1500 skb->protocol = vlan_get_protocol(skb); 1501 return 0; 1502 } 1503 1504 if (skb_vlan_tag_present(skb)) { 1505 /* Based on hw strategy, use out_vtag in two layer tag case, 1506 * and use inner_vtag in one tag case. 1507 */ 1508 if (skb->protocol == htons(ETH_P_8021Q) && 1509 handle->port_base_vlan_state == 1510 HNAE3_PORT_BASE_VLAN_DISABLE) 1511 rc = HNS3_OUTER_VLAN_TAG; 1512 else 1513 rc = HNS3_INNER_VLAN_TAG; 1514 1515 skb->protocol = vlan_get_protocol(skb); 1516 return rc; 1517 } 1518 1519 rc = skb_cow_head(skb, 0); 1520 if (unlikely(rc < 0)) 1521 return rc; 1522 1523 vhdr = (struct vlan_ethhdr *)skb->data; 1524 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) 1525 & VLAN_PRIO_MASK); 1526 1527 skb->protocol = vlan_get_protocol(skb); 1528 return 0; 1529 } 1530 1531 /* check if the hardware is capable of checksum offloading */ 1532 static bool hns3_check_hw_tx_csum(struct sk_buff *skb) 1533 { 1534 struct hns3_nic_priv *priv = netdev_priv(skb->dev); 1535 1536 /* Kindly note, due to backward compatibility of the TX descriptor, 1537 * HW checksum of the non-IP packets and GSO packets is handled at 1538 * different place in the following code 1539 */ 1540 if (skb_csum_is_sctp(skb) || skb_is_gso(skb) || 1541 !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state)) 1542 return false; 1543 1544 return true; 1545 } 1546 1547 struct hns3_desc_param { 1548 u32 paylen_ol4cs; 1549 u32 ol_type_vlan_len_msec; 1550 u32 type_cs_vlan_tso; 1551 u16 mss_hw_csum; 1552 u16 inner_vtag; 1553 u16 out_vtag; 1554 }; 1555 1556 static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa) 1557 { 1558 pa->paylen_ol4cs = skb->len; 1559 pa->ol_type_vlan_len_msec = 0; 1560 pa->type_cs_vlan_tso = 0; 1561 pa->mss_hw_csum = 0; 1562 pa->inner_vtag = 0; 1563 pa->out_vtag = 0; 1564 } 1565 1566 static int hns3_handle_vlan_info(struct hns3_enet_ring *ring, 1567 struct sk_buff *skb, 1568 struct hns3_desc_param *param) 1569 { 1570 int ret; 1571 1572 ret = hns3_handle_vtags(ring, skb); 1573 if (unlikely(ret < 0)) { 1574 hns3_ring_stats_update(ring, tx_vlan_err); 1575 return ret; 1576 } else if (ret == HNS3_INNER_VLAN_TAG) { 1577 param->inner_vtag = skb_vlan_tag_get(skb); 1578 param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & 1579 VLAN_PRIO_MASK; 1580 hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); 1581 } else if (ret == HNS3_OUTER_VLAN_TAG) { 1582 param->out_vtag = skb_vlan_tag_get(skb); 1583 param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & 1584 VLAN_PRIO_MASK; 1585 hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, 1586 1); 1587 } 1588 return 0; 1589 } 1590 1591 static int hns3_handle_csum_partial(struct hns3_enet_ring *ring, 1592 struct sk_buff *skb, 1593 struct hns3_desc_cb *desc_cb, 1594 struct hns3_desc_param *param) 1595 { 1596 u8 ol4_proto, il4_proto; 1597 int ret; 1598 1599 if (hns3_check_hw_tx_csum(skb)) { 1600 /* set checksum start and offset, defined in 2 Bytes */ 1601 hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, 1602 skb_checksum_start_offset(skb) >> 1); 1603 hns3_set_field(param->ol_type_vlan_len_msec, 1604 HNS3_TXD_CSUM_OFFSET_S, 1605 skb->csum_offset >> 1); 1606 param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); 1607 return 0; 1608 } 1609 1610 skb_reset_mac_len(skb); 1611 1612 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); 1613 if (unlikely(ret < 0)) { 1614 hns3_ring_stats_update(ring, tx_l4_proto_err); 1615 return ret; 1616 } 1617 1618 ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, 1619 ¶m->type_cs_vlan_tso, 1620 ¶m->ol_type_vlan_len_msec); 1621 if (unlikely(ret < 0)) { 1622 hns3_ring_stats_update(ring, tx_l2l3l4_err); 1623 return ret; 1624 } 1625 1626 ret = hns3_set_tso(skb, ¶m->paylen_ol4cs, ¶m->mss_hw_csum, 1627 ¶m->type_cs_vlan_tso, &desc_cb->send_bytes); 1628 if (unlikely(ret < 0)) { 1629 hns3_ring_stats_update(ring, tx_tso_err); 1630 return ret; 1631 } 1632 return 0; 1633 } 1634 1635 static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, 1636 struct sk_buff *skb, struct hns3_desc *desc, 1637 struct hns3_desc_cb *desc_cb) 1638 { 1639 struct hns3_desc_param param; 1640 int ret; 1641 1642 hns3_init_desc_data(skb, ¶m); 1643 ret = hns3_handle_vlan_info(ring, skb, ¶m); 1644 if (unlikely(ret < 0)) 1645 return ret; 1646 1647 desc_cb->send_bytes = skb->len; 1648 1649 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1650 ret = hns3_handle_csum_partial(ring, skb, desc_cb, ¶m); 1651 if (ret) 1652 return ret; 1653 } 1654 1655 /* Set txbd */ 1656 desc->tx.ol_type_vlan_len_msec = 1657 cpu_to_le32(param.ol_type_vlan_len_msec); 1658 desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso); 1659 desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs); 1660 desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum); 1661 desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag); 1662 desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag); 1663 1664 return 0; 1665 } 1666 1667 static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma, 1668 unsigned int size) 1669 { 1670 #define HNS3_LIKELY_BD_NUM 1 1671 1672 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1673 unsigned int frag_buf_num; 1674 int k, sizeoflast; 1675 1676 if (likely(size <= HNS3_MAX_BD_SIZE)) { 1677 desc->addr = cpu_to_le64(dma); 1678 desc->tx.send_size = cpu_to_le16(size); 1679 desc->tx.bdtp_fe_sc_vld_ra_ri = 1680 cpu_to_le16(BIT(HNS3_TXD_VLD_B)); 1681 1682 trace_hns3_tx_desc(ring, ring->next_to_use); 1683 ring_ptr_move_fw(ring, next_to_use); 1684 return HNS3_LIKELY_BD_NUM; 1685 } 1686 1687 frag_buf_num = hns3_tx_bd_count(size); 1688 sizeoflast = size % HNS3_MAX_BD_SIZE; 1689 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 1690 1691 /* When frag size is bigger than hardware limit, split this frag */ 1692 for (k = 0; k < frag_buf_num; k++) { 1693 /* now, fill the descriptor */ 1694 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); 1695 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? 1696 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); 1697 desc->tx.bdtp_fe_sc_vld_ra_ri = 1698 cpu_to_le16(BIT(HNS3_TXD_VLD_B)); 1699 1700 trace_hns3_tx_desc(ring, ring->next_to_use); 1701 /* move ring pointer to next */ 1702 ring_ptr_move_fw(ring, next_to_use); 1703 1704 desc = &ring->desc[ring->next_to_use]; 1705 } 1706 1707 return frag_buf_num; 1708 } 1709 1710 static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv, 1711 unsigned int type) 1712 { 1713 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 1714 struct device *dev = ring_to_dev(ring); 1715 unsigned int size; 1716 dma_addr_t dma; 1717 1718 if (type & (DESC_TYPE_FRAGLIST_SKB | DESC_TYPE_SKB)) { 1719 struct sk_buff *skb = (struct sk_buff *)priv; 1720 1721 size = skb_headlen(skb); 1722 if (!size) 1723 return 0; 1724 1725 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 1726 } else if (type & DESC_TYPE_BOUNCE_HEAD) { 1727 /* Head data has been filled in hns3_handle_tx_bounce(), 1728 * just return 0 here. 1729 */ 1730 return 0; 1731 } else { 1732 skb_frag_t *frag = (skb_frag_t *)priv; 1733 1734 size = skb_frag_size(frag); 1735 if (!size) 1736 return 0; 1737 1738 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 1739 } 1740 1741 if (unlikely(dma_mapping_error(dev, dma))) { 1742 hns3_ring_stats_update(ring, sw_err_cnt); 1743 return -ENOMEM; 1744 } 1745 1746 desc_cb->priv = priv; 1747 desc_cb->length = size; 1748 desc_cb->dma = dma; 1749 desc_cb->type = type; 1750 1751 return hns3_fill_desc(ring, dma, size); 1752 } 1753 1754 static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, 1755 unsigned int bd_num) 1756 { 1757 unsigned int size; 1758 int i; 1759 1760 size = skb_headlen(skb); 1761 while (size > HNS3_MAX_BD_SIZE) { 1762 bd_size[bd_num++] = HNS3_MAX_BD_SIZE; 1763 size -= HNS3_MAX_BD_SIZE; 1764 1765 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1766 return bd_num; 1767 } 1768 1769 if (size) { 1770 bd_size[bd_num++] = size; 1771 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1772 return bd_num; 1773 } 1774 1775 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1776 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1777 size = skb_frag_size(frag); 1778 if (!size) 1779 continue; 1780 1781 while (size > HNS3_MAX_BD_SIZE) { 1782 bd_size[bd_num++] = HNS3_MAX_BD_SIZE; 1783 size -= HNS3_MAX_BD_SIZE; 1784 1785 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1786 return bd_num; 1787 } 1788 1789 bd_size[bd_num++] = size; 1790 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1791 return bd_num; 1792 } 1793 1794 return bd_num; 1795 } 1796 1797 static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size, 1798 u8 max_non_tso_bd_num, unsigned int bd_num, 1799 unsigned int recursion_level) 1800 { 1801 #define HNS3_MAX_RECURSION_LEVEL 24 1802 1803 struct sk_buff *frag_skb; 1804 1805 /* If the total len is within the max bd limit */ 1806 if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level && 1807 !skb_has_frag_list(skb) && 1808 skb_shinfo(skb)->nr_frags < max_non_tso_bd_num)) 1809 return skb_shinfo(skb)->nr_frags + 1U; 1810 1811 if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL)) 1812 return UINT_MAX; 1813 1814 bd_num = hns3_skb_bd_num(skb, bd_size, bd_num); 1815 if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM) 1816 return bd_num; 1817 1818 skb_walk_frags(skb, frag_skb) { 1819 bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num, 1820 bd_num, recursion_level + 1); 1821 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1822 return bd_num; 1823 } 1824 1825 return bd_num; 1826 } 1827 1828 static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) 1829 { 1830 if (!skb->encapsulation) 1831 return skb_transport_offset(skb) + tcp_hdrlen(skb); 1832 1833 return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); 1834 } 1835 1836 /* HW need every continuous max_non_tso_bd_num buffer data to be larger 1837 * than MSS, we simplify it by ensuring skb_headlen + the first continuous 1838 * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss, 1839 * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger 1840 * than MSS except the last max_non_tso_bd_num - 1 frags. 1841 */ 1842 static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, 1843 unsigned int bd_num, u8 max_non_tso_bd_num) 1844 { 1845 unsigned int tot_len = 0; 1846 int i; 1847 1848 for (i = 0; i < max_non_tso_bd_num - 1U; i++) 1849 tot_len += bd_size[i]; 1850 1851 /* ensure the first max_non_tso_bd_num frags is greater than 1852 * mss + header 1853 */ 1854 if (tot_len + bd_size[max_non_tso_bd_num - 1U] < 1855 skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) 1856 return true; 1857 1858 /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater 1859 * than mss except the last one. 1860 */ 1861 for (i = 0; i < bd_num - max_non_tso_bd_num; i++) { 1862 tot_len -= bd_size[i]; 1863 tot_len += bd_size[i + max_non_tso_bd_num - 1U]; 1864 1865 if (tot_len < skb_shinfo(skb)->gso_size) 1866 return true; 1867 } 1868 1869 return false; 1870 } 1871 1872 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) 1873 { 1874 int i; 1875 1876 for (i = 0; i < MAX_SKB_FRAGS; i++) 1877 size[i] = skb_frag_size(&shinfo->frags[i]); 1878 } 1879 1880 static int hns3_skb_linearize(struct hns3_enet_ring *ring, 1881 struct sk_buff *skb, 1882 unsigned int bd_num) 1883 { 1884 /* 'bd_num == UINT_MAX' means the skb' fraglist has a 1885 * recursion level of over HNS3_MAX_RECURSION_LEVEL. 1886 */ 1887 if (bd_num == UINT_MAX) { 1888 hns3_ring_stats_update(ring, over_max_recursion); 1889 return -ENOMEM; 1890 } 1891 1892 /* The skb->len has exceeded the hw limitation, linearization 1893 * will not help. 1894 */ 1895 if (skb->len > HNS3_MAX_TSO_SIZE || 1896 (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { 1897 hns3_ring_stats_update(ring, hw_limitation); 1898 return -ENOMEM; 1899 } 1900 1901 if (__skb_linearize(skb)) { 1902 hns3_ring_stats_update(ring, sw_err_cnt); 1903 return -ENOMEM; 1904 } 1905 1906 return 0; 1907 } 1908 1909 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, 1910 struct net_device *netdev, 1911 struct sk_buff *skb) 1912 { 1913 struct hns3_nic_priv *priv = netdev_priv(netdev); 1914 u8 max_non_tso_bd_num = priv->max_non_tso_bd_num; 1915 unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; 1916 unsigned int bd_num; 1917 1918 bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0); 1919 if (unlikely(bd_num > max_non_tso_bd_num)) { 1920 if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && 1921 !hns3_skb_need_linearized(skb, bd_size, bd_num, 1922 max_non_tso_bd_num)) { 1923 trace_hns3_over_max_bd(skb); 1924 goto out; 1925 } 1926 1927 if (hns3_skb_linearize(ring, skb, bd_num)) 1928 return -ENOMEM; 1929 1930 bd_num = hns3_tx_bd_count(skb->len); 1931 1932 hns3_ring_stats_update(ring, tx_copy); 1933 } 1934 1935 out: 1936 if (likely(ring_space(ring) >= bd_num)) 1937 return bd_num; 1938 1939 netif_stop_subqueue(netdev, ring->queue_index); 1940 smp_mb(); /* Memory barrier before checking ring_space */ 1941 1942 /* Start queue in case hns3_clean_tx_ring has just made room 1943 * available and has not seen the queue stopped state performed 1944 * by netif_stop_subqueue above. 1945 */ 1946 if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) && 1947 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 1948 netif_start_subqueue(netdev, ring->queue_index); 1949 return bd_num; 1950 } 1951 1952 hns3_ring_stats_update(ring, tx_busy); 1953 1954 return -EBUSY; 1955 } 1956 1957 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) 1958 { 1959 struct device *dev = ring_to_dev(ring); 1960 unsigned int i; 1961 1962 for (i = 0; i < ring->desc_num; i++) { 1963 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1964 struct hns3_desc_cb *desc_cb; 1965 1966 memset(desc, 0, sizeof(*desc)); 1967 1968 /* check if this is where we started */ 1969 if (ring->next_to_use == next_to_use_orig) 1970 break; 1971 1972 /* rollback one */ 1973 ring_ptr_move_bw(ring, next_to_use); 1974 1975 desc_cb = &ring->desc_cb[ring->next_to_use]; 1976 1977 if (!desc_cb->dma) 1978 continue; 1979 1980 /* unmap the descriptor dma address */ 1981 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) 1982 dma_unmap_single(dev, desc_cb->dma, desc_cb->length, 1983 DMA_TO_DEVICE); 1984 else if (desc_cb->type & 1985 (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) 1986 hns3_tx_spare_rollback(ring, desc_cb->length); 1987 else if (desc_cb->length) 1988 dma_unmap_page(dev, desc_cb->dma, desc_cb->length, 1989 DMA_TO_DEVICE); 1990 1991 desc_cb->length = 0; 1992 desc_cb->dma = 0; 1993 desc_cb->type = DESC_TYPE_UNKNOWN; 1994 } 1995 } 1996 1997 static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, 1998 struct sk_buff *skb, unsigned int type) 1999 { 2000 struct sk_buff *frag_skb; 2001 int i, ret, bd_num = 0; 2002 2003 ret = hns3_map_and_fill_desc(ring, skb, type); 2004 if (unlikely(ret < 0)) 2005 return ret; 2006 2007 bd_num += ret; 2008 2009 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2010 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2011 2012 ret = hns3_map_and_fill_desc(ring, frag, DESC_TYPE_PAGE); 2013 if (unlikely(ret < 0)) 2014 return ret; 2015 2016 bd_num += ret; 2017 } 2018 2019 skb_walk_frags(skb, frag_skb) { 2020 ret = hns3_fill_skb_to_desc(ring, frag_skb, 2021 DESC_TYPE_FRAGLIST_SKB); 2022 if (unlikely(ret < 0)) 2023 return ret; 2024 2025 bd_num += ret; 2026 } 2027 2028 return bd_num; 2029 } 2030 2031 static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, 2032 bool doorbell) 2033 { 2034 ring->pending_buf += num; 2035 2036 if (!doorbell) { 2037 hns3_ring_stats_update(ring, tx_more); 2038 return; 2039 } 2040 2041 if (!ring->pending_buf) 2042 return; 2043 2044 writel(ring->pending_buf, 2045 ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); 2046 ring->pending_buf = 0; 2047 WRITE_ONCE(ring->last_to_use, ring->next_to_use); 2048 } 2049 2050 static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb, 2051 struct hns3_desc *desc) 2052 { 2053 struct hnae3_handle *h = hns3_get_handle(netdev); 2054 2055 if (!(h->ae_algo->ops->set_tx_hwts_info && 2056 h->ae_algo->ops->set_tx_hwts_info(h, skb))) 2057 return; 2058 2059 desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B)); 2060 } 2061 2062 static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring, 2063 struct sk_buff *skb) 2064 { 2065 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 2066 unsigned int type = DESC_TYPE_BOUNCE_HEAD; 2067 unsigned int size = skb_headlen(skb); 2068 dma_addr_t dma; 2069 int bd_num = 0; 2070 u32 cb_len; 2071 void *buf; 2072 int ret; 2073 2074 if (skb->len <= ring->tx_copybreak) { 2075 size = skb->len; 2076 type = DESC_TYPE_BOUNCE_ALL; 2077 } 2078 2079 /* hns3_can_use_tx_bounce() is called to ensure the below 2080 * function can always return the tx buffer. 2081 */ 2082 buf = hns3_tx_spare_alloc(ring, size, &dma, &cb_len); 2083 2084 ret = skb_copy_bits(skb, 0, buf, size); 2085 if (unlikely(ret < 0)) { 2086 hns3_tx_spare_rollback(ring, cb_len); 2087 hns3_ring_stats_update(ring, copy_bits_err); 2088 return ret; 2089 } 2090 2091 desc_cb->priv = skb; 2092 desc_cb->length = cb_len; 2093 desc_cb->dma = dma; 2094 desc_cb->type = type; 2095 2096 bd_num += hns3_fill_desc(ring, dma, size); 2097 2098 if (type == DESC_TYPE_BOUNCE_HEAD) { 2099 ret = hns3_fill_skb_to_desc(ring, skb, 2100 DESC_TYPE_BOUNCE_HEAD); 2101 if (unlikely(ret < 0)) 2102 return ret; 2103 2104 bd_num += ret; 2105 } 2106 2107 dma_sync_single_for_device(ring_to_dev(ring), dma, size, 2108 DMA_TO_DEVICE); 2109 2110 hns3_ring_stats_update(ring, tx_bounce); 2111 2112 return bd_num; 2113 } 2114 2115 static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, 2116 struct sk_buff *skb) 2117 { 2118 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 2119 u32 nfrag = skb_shinfo(skb)->nr_frags + 1; 2120 struct sg_table *sgt; 2121 int i, bd_num = 0; 2122 dma_addr_t dma; 2123 u32 cb_len; 2124 int nents; 2125 2126 if (skb_has_frag_list(skb)) 2127 nfrag = HNS3_MAX_TSO_BD_NUM; 2128 2129 /* hns3_can_use_tx_sgl() is called to ensure the below 2130 * function can always return the tx buffer. 2131 */ 2132 sgt = hns3_tx_spare_alloc(ring, HNS3_SGL_SIZE(nfrag), 2133 &dma, &cb_len); 2134 2135 /* scatterlist follows by the sg table */ 2136 sgt->sgl = (struct scatterlist *)(sgt + 1); 2137 sg_init_table(sgt->sgl, nfrag); 2138 nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); 2139 if (unlikely(nents < 0)) { 2140 hns3_tx_spare_rollback(ring, cb_len); 2141 hns3_ring_stats_update(ring, skb2sgl_err); 2142 return -ENOMEM; 2143 } 2144 2145 sgt->orig_nents = nents; 2146 sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, 2147 DMA_TO_DEVICE); 2148 if (unlikely(!sgt->nents)) { 2149 hns3_tx_spare_rollback(ring, cb_len); 2150 hns3_ring_stats_update(ring, map_sg_err); 2151 return -ENOMEM; 2152 } 2153 2154 desc_cb->priv = skb; 2155 desc_cb->length = cb_len; 2156 desc_cb->dma = dma; 2157 desc_cb->type = DESC_TYPE_SGL_SKB; 2158 2159 for (i = 0; i < sgt->nents; i++) 2160 bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i), 2161 sg_dma_len(sgt->sgl + i)); 2162 hns3_ring_stats_update(ring, tx_sgl); 2163 2164 return bd_num; 2165 } 2166 2167 static int hns3_handle_desc_filling(struct hns3_enet_ring *ring, 2168 struct sk_buff *skb) 2169 { 2170 u32 space; 2171 2172 if (!ring->tx_spare) 2173 goto out; 2174 2175 space = hns3_tx_spare_space(ring); 2176 2177 if (hns3_can_use_tx_sgl(ring, skb, space)) 2178 return hns3_handle_tx_sgl(ring, skb); 2179 2180 if (hns3_can_use_tx_bounce(ring, skb, space)) 2181 return hns3_handle_tx_bounce(ring, skb); 2182 2183 out: 2184 return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); 2185 } 2186 2187 static int hns3_handle_skb_desc(struct hns3_enet_ring *ring, 2188 struct sk_buff *skb, 2189 struct hns3_desc_cb *desc_cb, 2190 int next_to_use_head) 2191 { 2192 int ret; 2193 2194 ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], 2195 desc_cb); 2196 if (unlikely(ret < 0)) 2197 goto fill_err; 2198 2199 /* 'ret < 0' means filling error, 'ret == 0' means skb->len is 2200 * zero, which is unlikely, and 'ret > 0' means how many tx desc 2201 * need to be notified to the hw. 2202 */ 2203 ret = hns3_handle_desc_filling(ring, skb); 2204 if (likely(ret > 0)) 2205 return ret; 2206 2207 fill_err: 2208 hns3_clear_desc(ring, next_to_use_head); 2209 return ret; 2210 } 2211 2212 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 2213 { 2214 struct hns3_nic_priv *priv = netdev_priv(netdev); 2215 struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; 2216 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 2217 struct netdev_queue *dev_queue; 2218 int pre_ntu, ret; 2219 bool doorbell; 2220 2221 /* Hardware can only handle short frames above 32 bytes */ 2222 if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { 2223 hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); 2224 2225 hns3_ring_stats_update(ring, sw_err_cnt); 2226 2227 return NETDEV_TX_OK; 2228 } 2229 2230 /* Prefetch the data used later */ 2231 prefetch(skb->data); 2232 2233 ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); 2234 if (unlikely(ret <= 0)) { 2235 if (ret == -EBUSY) { 2236 hns3_tx_doorbell(ring, 0, true); 2237 return NETDEV_TX_BUSY; 2238 } 2239 2240 hns3_rl_err(netdev, "xmit error: %d!\n", ret); 2241 goto out_err_tx_ok; 2242 } 2243 2244 ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use); 2245 if (unlikely(ret <= 0)) 2246 goto out_err_tx_ok; 2247 2248 pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : 2249 (ring->desc_num - 1); 2250 2251 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 2252 hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]); 2253 2254 ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= 2255 cpu_to_le16(BIT(HNS3_TXD_FE_B)); 2256 trace_hns3_tx_desc(ring, pre_ntu); 2257 2258 skb_tx_timestamp(skb); 2259 2260 /* Complete translate all packets */ 2261 dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); 2262 doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes, 2263 netdev_xmit_more()); 2264 hns3_tx_doorbell(ring, ret, doorbell); 2265 2266 return NETDEV_TX_OK; 2267 2268 out_err_tx_ok: 2269 dev_kfree_skb_any(skb); 2270 hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); 2271 return NETDEV_TX_OK; 2272 } 2273 2274 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) 2275 { 2276 char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN]; 2277 char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN]; 2278 struct hnae3_handle *h = hns3_get_handle(netdev); 2279 struct sockaddr *mac_addr = p; 2280 int ret; 2281 2282 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 2283 return -EADDRNOTAVAIL; 2284 2285 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { 2286 hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); 2287 netdev_info(netdev, "already using mac address %s\n", 2288 format_mac_addr_sa); 2289 return 0; 2290 } 2291 2292 /* For VF device, if there is a perm_addr, then the user will not 2293 * be allowed to change the address. 2294 */ 2295 if (!hns3_is_phys_func(h->pdev) && 2296 !is_zero_ether_addr(netdev->perm_addr)) { 2297 hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr); 2298 hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); 2299 netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n", 2300 format_mac_addr_perm, format_mac_addr_sa); 2301 return -EPERM; 2302 } 2303 2304 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); 2305 if (ret) { 2306 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); 2307 return ret; 2308 } 2309 2310 eth_hw_addr_set(netdev, mac_addr->sa_data); 2311 2312 return 0; 2313 } 2314 2315 static int hns3_nic_do_ioctl(struct net_device *netdev, 2316 struct ifreq *ifr, int cmd) 2317 { 2318 struct hnae3_handle *h = hns3_get_handle(netdev); 2319 2320 if (!netif_running(netdev)) 2321 return -EINVAL; 2322 2323 if (!h->ae_algo->ops->do_ioctl) 2324 return -EOPNOTSUPP; 2325 2326 return h->ae_algo->ops->do_ioctl(h, ifr, cmd); 2327 } 2328 2329 static int hns3_nic_set_features(struct net_device *netdev, 2330 netdev_features_t features) 2331 { 2332 netdev_features_t changed = netdev->features ^ features; 2333 struct hns3_nic_priv *priv = netdev_priv(netdev); 2334 struct hnae3_handle *h = priv->ae_handle; 2335 bool enable; 2336 int ret; 2337 2338 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { 2339 enable = !!(features & NETIF_F_GRO_HW); 2340 ret = h->ae_algo->ops->set_gro_en(h, enable); 2341 if (ret) 2342 return ret; 2343 } 2344 2345 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && 2346 h->ae_algo->ops->enable_hw_strip_rxvtag) { 2347 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 2348 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); 2349 if (ret) 2350 return ret; 2351 } 2352 2353 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { 2354 enable = !!(features & NETIF_F_NTUPLE); 2355 h->ae_algo->ops->enable_fd(h, enable); 2356 } 2357 2358 if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && 2359 h->ae_algo->ops->cls_flower_active(h)) { 2360 netdev_err(netdev, 2361 "there are offloaded TC filters active, cannot disable HW TC offload"); 2362 return -EINVAL; 2363 } 2364 2365 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && 2366 h->ae_algo->ops->enable_vlan_filter) { 2367 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); 2368 ret = h->ae_algo->ops->enable_vlan_filter(h, enable); 2369 if (ret) 2370 return ret; 2371 } 2372 2373 netdev->features = features; 2374 return 0; 2375 } 2376 2377 static netdev_features_t hns3_features_check(struct sk_buff *skb, 2378 struct net_device *dev, 2379 netdev_features_t features) 2380 { 2381 #define HNS3_MAX_HDR_LEN 480U 2382 #define HNS3_MAX_L4_HDR_LEN 60U 2383 2384 size_t len; 2385 2386 if (skb->ip_summed != CHECKSUM_PARTIAL) 2387 return features; 2388 2389 if (skb->encapsulation) 2390 len = skb_inner_transport_header(skb) - skb->data; 2391 else 2392 len = skb_transport_header(skb) - skb->data; 2393 2394 /* Assume L4 is 60 byte as TCP is the only protocol with a 2395 * a flexible value, and it's max len is 60 bytes. 2396 */ 2397 len += HNS3_MAX_L4_HDR_LEN; 2398 2399 /* Hardware only supports checksum on the skb with a max header 2400 * len of 480 bytes. 2401 */ 2402 if (len > HNS3_MAX_HDR_LEN) 2403 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2404 2405 return features; 2406 } 2407 2408 static void hns3_fetch_stats(struct rtnl_link_stats64 *stats, 2409 struct hns3_enet_ring *ring, bool is_tx) 2410 { 2411 unsigned int start; 2412 2413 do { 2414 start = u64_stats_fetch_begin_irq(&ring->syncp); 2415 if (is_tx) { 2416 stats->tx_bytes += ring->stats.tx_bytes; 2417 stats->tx_packets += ring->stats.tx_pkts; 2418 stats->tx_dropped += ring->stats.sw_err_cnt; 2419 stats->tx_dropped += ring->stats.tx_vlan_err; 2420 stats->tx_dropped += ring->stats.tx_l4_proto_err; 2421 stats->tx_dropped += ring->stats.tx_l2l3l4_err; 2422 stats->tx_dropped += ring->stats.tx_tso_err; 2423 stats->tx_dropped += ring->stats.over_max_recursion; 2424 stats->tx_dropped += ring->stats.hw_limitation; 2425 stats->tx_dropped += ring->stats.copy_bits_err; 2426 stats->tx_dropped += ring->stats.skb2sgl_err; 2427 stats->tx_dropped += ring->stats.map_sg_err; 2428 stats->tx_errors += ring->stats.sw_err_cnt; 2429 stats->tx_errors += ring->stats.tx_vlan_err; 2430 stats->tx_errors += ring->stats.tx_l4_proto_err; 2431 stats->tx_errors += ring->stats.tx_l2l3l4_err; 2432 stats->tx_errors += ring->stats.tx_tso_err; 2433 stats->tx_errors += ring->stats.over_max_recursion; 2434 stats->tx_errors += ring->stats.hw_limitation; 2435 stats->tx_errors += ring->stats.copy_bits_err; 2436 stats->tx_errors += ring->stats.skb2sgl_err; 2437 stats->tx_errors += ring->stats.map_sg_err; 2438 } else { 2439 stats->rx_bytes += ring->stats.rx_bytes; 2440 stats->rx_packets += ring->stats.rx_pkts; 2441 stats->rx_dropped += ring->stats.l2_err; 2442 stats->rx_errors += ring->stats.l2_err; 2443 stats->rx_errors += ring->stats.l3l4_csum_err; 2444 stats->rx_crc_errors += ring->stats.l2_err; 2445 stats->multicast += ring->stats.rx_multicast; 2446 stats->rx_length_errors += ring->stats.err_pkt_len; 2447 } 2448 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 2449 } 2450 2451 static void hns3_nic_get_stats64(struct net_device *netdev, 2452 struct rtnl_link_stats64 *stats) 2453 { 2454 struct hns3_nic_priv *priv = netdev_priv(netdev); 2455 int queue_num = priv->ae_handle->kinfo.num_tqps; 2456 struct hnae3_handle *handle = priv->ae_handle; 2457 struct rtnl_link_stats64 ring_total_stats; 2458 struct hns3_enet_ring *ring; 2459 unsigned int idx; 2460 2461 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 2462 return; 2463 2464 handle->ae_algo->ops->update_stats(handle, &netdev->stats); 2465 2466 memset(&ring_total_stats, 0, sizeof(ring_total_stats)); 2467 for (idx = 0; idx < queue_num; idx++) { 2468 /* fetch the tx stats */ 2469 ring = &priv->ring[idx]; 2470 hns3_fetch_stats(&ring_total_stats, ring, true); 2471 2472 /* fetch the rx stats */ 2473 ring = &priv->ring[idx + queue_num]; 2474 hns3_fetch_stats(&ring_total_stats, ring, false); 2475 } 2476 2477 stats->tx_bytes = ring_total_stats.tx_bytes; 2478 stats->tx_packets = ring_total_stats.tx_packets; 2479 stats->rx_bytes = ring_total_stats.rx_bytes; 2480 stats->rx_packets = ring_total_stats.rx_packets; 2481 2482 stats->rx_errors = ring_total_stats.rx_errors; 2483 stats->multicast = ring_total_stats.multicast; 2484 stats->rx_length_errors = ring_total_stats.rx_length_errors; 2485 stats->rx_crc_errors = ring_total_stats.rx_crc_errors; 2486 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 2487 2488 stats->tx_errors = ring_total_stats.tx_errors; 2489 stats->rx_dropped = ring_total_stats.rx_dropped; 2490 stats->tx_dropped = ring_total_stats.tx_dropped; 2491 stats->collisions = netdev->stats.collisions; 2492 stats->rx_over_errors = netdev->stats.rx_over_errors; 2493 stats->rx_frame_errors = netdev->stats.rx_frame_errors; 2494 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; 2495 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; 2496 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; 2497 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; 2498 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; 2499 stats->tx_window_errors = netdev->stats.tx_window_errors; 2500 stats->rx_compressed = netdev->stats.rx_compressed; 2501 stats->tx_compressed = netdev->stats.tx_compressed; 2502 } 2503 2504 static int hns3_setup_tc(struct net_device *netdev, void *type_data) 2505 { 2506 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 2507 struct hnae3_knic_private_info *kinfo; 2508 u8 tc = mqprio_qopt->qopt.num_tc; 2509 u16 mode = mqprio_qopt->mode; 2510 u8 hw = mqprio_qopt->qopt.hw; 2511 struct hnae3_handle *h; 2512 2513 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && 2514 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) 2515 return -EOPNOTSUPP; 2516 2517 if (tc > HNAE3_MAX_TC) 2518 return -EINVAL; 2519 2520 if (!netdev) 2521 return -EINVAL; 2522 2523 h = hns3_get_handle(netdev); 2524 kinfo = &h->kinfo; 2525 2526 netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); 2527 2528 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? 2529 kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP; 2530 } 2531 2532 static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv, 2533 struct flow_cls_offload *flow) 2534 { 2535 int tc = tc_classid_to_hwtc(priv->netdev, flow->classid); 2536 struct hnae3_handle *h = hns3_get_handle(priv->netdev); 2537 2538 switch (flow->command) { 2539 case FLOW_CLS_REPLACE: 2540 if (h->ae_algo->ops->add_cls_flower) 2541 return h->ae_algo->ops->add_cls_flower(h, flow, tc); 2542 break; 2543 case FLOW_CLS_DESTROY: 2544 if (h->ae_algo->ops->del_cls_flower) 2545 return h->ae_algo->ops->del_cls_flower(h, flow); 2546 break; 2547 default: 2548 break; 2549 } 2550 2551 return -EOPNOTSUPP; 2552 } 2553 2554 static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 2555 void *cb_priv) 2556 { 2557 struct hns3_nic_priv *priv = cb_priv; 2558 2559 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) 2560 return -EOPNOTSUPP; 2561 2562 switch (type) { 2563 case TC_SETUP_CLSFLOWER: 2564 return hns3_setup_tc_cls_flower(priv, type_data); 2565 default: 2566 return -EOPNOTSUPP; 2567 } 2568 } 2569 2570 static LIST_HEAD(hns3_block_cb_list); 2571 2572 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, 2573 void *type_data) 2574 { 2575 struct hns3_nic_priv *priv = netdev_priv(dev); 2576 int ret; 2577 2578 switch (type) { 2579 case TC_SETUP_QDISC_MQPRIO: 2580 ret = hns3_setup_tc(dev, type_data); 2581 break; 2582 case TC_SETUP_BLOCK: 2583 ret = flow_block_cb_setup_simple(type_data, 2584 &hns3_block_cb_list, 2585 hns3_setup_tc_block_cb, 2586 priv, priv, true); 2587 break; 2588 default: 2589 return -EOPNOTSUPP; 2590 } 2591 2592 return ret; 2593 } 2594 2595 static int hns3_vlan_rx_add_vid(struct net_device *netdev, 2596 __be16 proto, u16 vid) 2597 { 2598 struct hnae3_handle *h = hns3_get_handle(netdev); 2599 int ret = -EIO; 2600 2601 if (h->ae_algo->ops->set_vlan_filter) 2602 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); 2603 2604 return ret; 2605 } 2606 2607 static int hns3_vlan_rx_kill_vid(struct net_device *netdev, 2608 __be16 proto, u16 vid) 2609 { 2610 struct hnae3_handle *h = hns3_get_handle(netdev); 2611 int ret = -EIO; 2612 2613 if (h->ae_algo->ops->set_vlan_filter) 2614 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); 2615 2616 return ret; 2617 } 2618 2619 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 2620 u8 qos, __be16 vlan_proto) 2621 { 2622 struct hnae3_handle *h = hns3_get_handle(netdev); 2623 int ret = -EIO; 2624 2625 netif_dbg(h, drv, netdev, 2626 "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n", 2627 vf, vlan, qos, ntohs(vlan_proto)); 2628 2629 if (h->ae_algo->ops->set_vf_vlan_filter) 2630 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, 2631 qos, vlan_proto); 2632 2633 return ret; 2634 } 2635 2636 static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) 2637 { 2638 struct hnae3_handle *handle = hns3_get_handle(netdev); 2639 2640 if (hns3_nic_resetting(netdev)) 2641 return -EBUSY; 2642 2643 if (!handle->ae_algo->ops->set_vf_spoofchk) 2644 return -EOPNOTSUPP; 2645 2646 return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); 2647 } 2648 2649 static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable) 2650 { 2651 struct hnae3_handle *handle = hns3_get_handle(netdev); 2652 2653 if (!handle->ae_algo->ops->set_vf_trust) 2654 return -EOPNOTSUPP; 2655 2656 return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); 2657 } 2658 2659 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) 2660 { 2661 struct hnae3_handle *h = hns3_get_handle(netdev); 2662 int ret; 2663 2664 if (hns3_nic_resetting(netdev)) 2665 return -EBUSY; 2666 2667 if (!h->ae_algo->ops->set_mtu) 2668 return -EOPNOTSUPP; 2669 2670 netif_dbg(h, drv, netdev, 2671 "change mtu from %u to %d\n", netdev->mtu, new_mtu); 2672 2673 ret = h->ae_algo->ops->set_mtu(h, new_mtu); 2674 if (ret) 2675 netdev_err(netdev, "failed to change MTU in hardware %d\n", 2676 ret); 2677 else 2678 netdev->mtu = new_mtu; 2679 2680 return ret; 2681 } 2682 2683 static int hns3_get_timeout_queue(struct net_device *ndev) 2684 { 2685 int i; 2686 2687 /* Find the stopped queue the same way the stack does */ 2688 for (i = 0; i < ndev->num_tx_queues; i++) { 2689 struct netdev_queue *q; 2690 unsigned long trans_start; 2691 2692 q = netdev_get_tx_queue(ndev, i); 2693 trans_start = READ_ONCE(q->trans_start); 2694 if (netif_xmit_stopped(q) && 2695 time_after(jiffies, 2696 (trans_start + ndev->watchdog_timeo))) { 2697 #ifdef CONFIG_BQL 2698 struct dql *dql = &q->dql; 2699 2700 netdev_info(ndev, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n", 2701 dql->last_obj_cnt, dql->num_queued, 2702 dql->adj_limit, dql->num_completed); 2703 #endif 2704 netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", 2705 q->state, 2706 jiffies_to_msecs(jiffies - trans_start)); 2707 break; 2708 } 2709 } 2710 2711 return i; 2712 } 2713 2714 static void hns3_dump_queue_stats(struct net_device *ndev, 2715 struct hns3_enet_ring *tx_ring, 2716 int timeout_queue) 2717 { 2718 struct napi_struct *napi = &tx_ring->tqp_vector->napi; 2719 struct hns3_nic_priv *priv = netdev_priv(ndev); 2720 2721 netdev_info(ndev, 2722 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", 2723 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, 2724 tx_ring->next_to_clean, napi->state); 2725 2726 netdev_info(ndev, 2727 "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n", 2728 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, 2729 tx_ring->stats.sw_err_cnt, tx_ring->pending_buf); 2730 2731 netdev_info(ndev, 2732 "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", 2733 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, 2734 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); 2735 } 2736 2737 static void hns3_dump_queue_reg(struct net_device *ndev, 2738 struct hns3_enet_ring *tx_ring) 2739 { 2740 netdev_info(ndev, 2741 "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", 2742 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_NUM_REG), 2743 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_HEAD_REG), 2744 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TAIL_REG), 2745 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_ERR_REG), 2746 readl(tx_ring->tqp_vector->mask_addr)); 2747 netdev_info(ndev, 2748 "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", 2749 hns3_tqp_read_reg(tx_ring, HNS3_RING_EN_REG), 2750 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TC_REG), 2751 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_FBDNUM_REG), 2752 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_OFFSET_REG), 2753 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_EBDNUM_REG), 2754 hns3_tqp_read_reg(tx_ring, 2755 HNS3_RING_TX_RING_EBD_OFFSET_REG)); 2756 } 2757 2758 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) 2759 { 2760 struct hns3_nic_priv *priv = netdev_priv(ndev); 2761 struct hnae3_handle *h = hns3_get_handle(ndev); 2762 struct hns3_enet_ring *tx_ring; 2763 int timeout_queue; 2764 2765 timeout_queue = hns3_get_timeout_queue(ndev); 2766 if (timeout_queue >= ndev->num_tx_queues) { 2767 netdev_info(ndev, 2768 "no netdev TX timeout queue found, timeout count: %llu\n", 2769 priv->tx_timeout_count); 2770 return false; 2771 } 2772 2773 priv->tx_timeout_count++; 2774 2775 tx_ring = &priv->ring[timeout_queue]; 2776 hns3_dump_queue_stats(ndev, tx_ring, timeout_queue); 2777 2778 /* When mac received many pause frames continuous, it's unable to send 2779 * packets, which may cause tx timeout 2780 */ 2781 if (h->ae_algo->ops->get_mac_stats) { 2782 struct hns3_mac_stats mac_stats; 2783 2784 h->ae_algo->ops->get_mac_stats(h, &mac_stats); 2785 netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", 2786 mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); 2787 } 2788 2789 hns3_dump_queue_reg(ndev, tx_ring); 2790 2791 return true; 2792 } 2793 2794 static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue) 2795 { 2796 struct hns3_nic_priv *priv = netdev_priv(ndev); 2797 struct hnae3_handle *h = priv->ae_handle; 2798 2799 if (!hns3_get_tx_timeo_queue_info(ndev)) 2800 return; 2801 2802 /* request the reset, and let the hclge to determine 2803 * which reset level should be done 2804 */ 2805 if (h->ae_algo->ops->reset_event) 2806 h->ae_algo->ops->reset_event(h->pdev, h); 2807 } 2808 2809 #ifdef CONFIG_RFS_ACCEL 2810 static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 2811 u16 rxq_index, u32 flow_id) 2812 { 2813 struct hnae3_handle *h = hns3_get_handle(dev); 2814 struct flow_keys fkeys; 2815 2816 if (!h->ae_algo->ops->add_arfs_entry) 2817 return -EOPNOTSUPP; 2818 2819 if (skb->encapsulation) 2820 return -EPROTONOSUPPORT; 2821 2822 if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0)) 2823 return -EPROTONOSUPPORT; 2824 2825 if ((fkeys.basic.n_proto != htons(ETH_P_IP) && 2826 fkeys.basic.n_proto != htons(ETH_P_IPV6)) || 2827 (fkeys.basic.ip_proto != IPPROTO_TCP && 2828 fkeys.basic.ip_proto != IPPROTO_UDP)) 2829 return -EPROTONOSUPPORT; 2830 2831 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); 2832 } 2833 #endif 2834 2835 static int hns3_nic_get_vf_config(struct net_device *ndev, int vf, 2836 struct ifla_vf_info *ivf) 2837 { 2838 struct hnae3_handle *h = hns3_get_handle(ndev); 2839 2840 if (!h->ae_algo->ops->get_vf_config) 2841 return -EOPNOTSUPP; 2842 2843 return h->ae_algo->ops->get_vf_config(h, vf, ivf); 2844 } 2845 2846 static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf, 2847 int link_state) 2848 { 2849 struct hnae3_handle *h = hns3_get_handle(ndev); 2850 2851 if (!h->ae_algo->ops->set_vf_link_state) 2852 return -EOPNOTSUPP; 2853 2854 return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); 2855 } 2856 2857 static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, 2858 int min_tx_rate, int max_tx_rate) 2859 { 2860 struct hnae3_handle *h = hns3_get_handle(ndev); 2861 2862 if (!h->ae_algo->ops->set_vf_rate) 2863 return -EOPNOTSUPP; 2864 2865 return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, 2866 false); 2867 } 2868 2869 static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 2870 { 2871 struct hnae3_handle *h = hns3_get_handle(netdev); 2872 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 2873 2874 if (!h->ae_algo->ops->set_vf_mac) 2875 return -EOPNOTSUPP; 2876 2877 if (is_multicast_ether_addr(mac)) { 2878 hnae3_format_mac_addr(format_mac_addr, mac); 2879 netdev_err(netdev, 2880 "Invalid MAC:%s specified. Could not set MAC\n", 2881 format_mac_addr); 2882 return -EINVAL; 2883 } 2884 2885 return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); 2886 } 2887 2888 static const struct net_device_ops hns3_nic_netdev_ops = { 2889 .ndo_open = hns3_nic_net_open, 2890 .ndo_stop = hns3_nic_net_stop, 2891 .ndo_start_xmit = hns3_nic_net_xmit, 2892 .ndo_tx_timeout = hns3_nic_net_timeout, 2893 .ndo_set_mac_address = hns3_nic_net_set_mac_address, 2894 .ndo_eth_ioctl = hns3_nic_do_ioctl, 2895 .ndo_change_mtu = hns3_nic_change_mtu, 2896 .ndo_set_features = hns3_nic_set_features, 2897 .ndo_features_check = hns3_features_check, 2898 .ndo_get_stats64 = hns3_nic_get_stats64, 2899 .ndo_setup_tc = hns3_nic_setup_tc, 2900 .ndo_set_rx_mode = hns3_nic_set_rx_mode, 2901 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, 2902 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, 2903 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, 2904 .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk, 2905 .ndo_set_vf_trust = hns3_set_vf_trust, 2906 #ifdef CONFIG_RFS_ACCEL 2907 .ndo_rx_flow_steer = hns3_rx_flow_steer, 2908 #endif 2909 .ndo_get_vf_config = hns3_nic_get_vf_config, 2910 .ndo_set_vf_link_state = hns3_nic_set_vf_link_state, 2911 .ndo_set_vf_rate = hns3_nic_set_vf_rate, 2912 .ndo_set_vf_mac = hns3_nic_set_vf_mac, 2913 }; 2914 2915 bool hns3_is_phys_func(struct pci_dev *pdev) 2916 { 2917 u32 dev_id = pdev->device; 2918 2919 switch (dev_id) { 2920 case HNAE3_DEV_ID_GE: 2921 case HNAE3_DEV_ID_25GE: 2922 case HNAE3_DEV_ID_25GE_RDMA: 2923 case HNAE3_DEV_ID_25GE_RDMA_MACSEC: 2924 case HNAE3_DEV_ID_50GE_RDMA: 2925 case HNAE3_DEV_ID_50GE_RDMA_MACSEC: 2926 case HNAE3_DEV_ID_100G_RDMA_MACSEC: 2927 case HNAE3_DEV_ID_200G_RDMA: 2928 return true; 2929 case HNAE3_DEV_ID_VF: 2930 case HNAE3_DEV_ID_RDMA_DCB_PFC_VF: 2931 return false; 2932 default: 2933 dev_warn(&pdev->dev, "un-recognized pci device-id %u", 2934 dev_id); 2935 } 2936 2937 return false; 2938 } 2939 2940 static void hns3_disable_sriov(struct pci_dev *pdev) 2941 { 2942 /* If our VFs are assigned we cannot shut down SR-IOV 2943 * without causing issues, so just leave the hardware 2944 * available but disabled 2945 */ 2946 if (pci_vfs_assigned(pdev)) { 2947 dev_warn(&pdev->dev, 2948 "disabling driver while VFs are assigned\n"); 2949 return; 2950 } 2951 2952 pci_disable_sriov(pdev); 2953 } 2954 2955 /* hns3_probe - Device initialization routine 2956 * @pdev: PCI device information struct 2957 * @ent: entry in hns3_pci_tbl 2958 * 2959 * hns3_probe initializes a PF identified by a pci_dev structure. 2960 * The OS initialization, configuring of the PF private structure, 2961 * and a hardware reset occur. 2962 * 2963 * Returns 0 on success, negative on failure 2964 */ 2965 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2966 { 2967 struct hnae3_ae_dev *ae_dev; 2968 int ret; 2969 2970 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); 2971 if (!ae_dev) 2972 return -ENOMEM; 2973 2974 ae_dev->pdev = pdev; 2975 ae_dev->flag = ent->driver_data; 2976 pci_set_drvdata(pdev, ae_dev); 2977 2978 ret = hnae3_register_ae_dev(ae_dev); 2979 if (ret) 2980 pci_set_drvdata(pdev, NULL); 2981 2982 return ret; 2983 } 2984 2985 /* hns3_remove - Device removal routine 2986 * @pdev: PCI device information struct 2987 */ 2988 static void hns3_remove(struct pci_dev *pdev) 2989 { 2990 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2991 2992 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) 2993 hns3_disable_sriov(pdev); 2994 2995 hnae3_unregister_ae_dev(ae_dev); 2996 pci_set_drvdata(pdev, NULL); 2997 } 2998 2999 /** 3000 * hns3_pci_sriov_configure 3001 * @pdev: pointer to a pci_dev structure 3002 * @num_vfs: number of VFs to allocate 3003 * 3004 * Enable or change the number of VFs. Called when the user updates the number 3005 * of VFs in sysfs. 3006 **/ 3007 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 3008 { 3009 int ret; 3010 3011 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { 3012 dev_warn(&pdev->dev, "Can not config SRIOV\n"); 3013 return -EINVAL; 3014 } 3015 3016 if (num_vfs) { 3017 ret = pci_enable_sriov(pdev, num_vfs); 3018 if (ret) 3019 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); 3020 else 3021 return num_vfs; 3022 } else if (!pci_vfs_assigned(pdev)) { 3023 pci_disable_sriov(pdev); 3024 } else { 3025 dev_warn(&pdev->dev, 3026 "Unable to free VFs because some are assigned to VMs.\n"); 3027 } 3028 3029 return 0; 3030 } 3031 3032 static void hns3_shutdown(struct pci_dev *pdev) 3033 { 3034 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3035 3036 hnae3_unregister_ae_dev(ae_dev); 3037 pci_set_drvdata(pdev, NULL); 3038 3039 if (system_state == SYSTEM_POWER_OFF) 3040 pci_set_power_state(pdev, PCI_D3hot); 3041 } 3042 3043 static int __maybe_unused hns3_suspend(struct device *dev) 3044 { 3045 struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); 3046 3047 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { 3048 dev_info(dev, "Begin to suspend.\n"); 3049 if (ae_dev->ops && ae_dev->ops->reset_prepare) 3050 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET); 3051 } 3052 3053 return 0; 3054 } 3055 3056 static int __maybe_unused hns3_resume(struct device *dev) 3057 { 3058 struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); 3059 3060 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { 3061 dev_info(dev, "Begin to resume.\n"); 3062 if (ae_dev->ops && ae_dev->ops->reset_done) 3063 ae_dev->ops->reset_done(ae_dev); 3064 } 3065 3066 return 0; 3067 } 3068 3069 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, 3070 pci_channel_state_t state) 3071 { 3072 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3073 pci_ers_result_t ret; 3074 3075 dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state); 3076 3077 if (state == pci_channel_io_perm_failure) 3078 return PCI_ERS_RESULT_DISCONNECT; 3079 3080 if (!ae_dev || !ae_dev->ops) { 3081 dev_err(&pdev->dev, 3082 "Can't recover - error happened before device initialized\n"); 3083 return PCI_ERS_RESULT_NONE; 3084 } 3085 3086 if (ae_dev->ops->handle_hw_ras_error) 3087 ret = ae_dev->ops->handle_hw_ras_error(ae_dev); 3088 else 3089 return PCI_ERS_RESULT_NONE; 3090 3091 return ret; 3092 } 3093 3094 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) 3095 { 3096 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3097 const struct hnae3_ae_ops *ops; 3098 enum hnae3_reset_type reset_type; 3099 struct device *dev = &pdev->dev; 3100 3101 if (!ae_dev || !ae_dev->ops) 3102 return PCI_ERS_RESULT_NONE; 3103 3104 ops = ae_dev->ops; 3105 /* request the reset */ 3106 if (ops->reset_event && ops->get_reset_level && 3107 ops->set_default_reset_request) { 3108 if (ae_dev->hw_err_reset_req) { 3109 reset_type = ops->get_reset_level(ae_dev, 3110 &ae_dev->hw_err_reset_req); 3111 ops->set_default_reset_request(ae_dev, reset_type); 3112 dev_info(dev, "requesting reset due to PCI error\n"); 3113 ops->reset_event(pdev, NULL); 3114 } 3115 3116 return PCI_ERS_RESULT_RECOVERED; 3117 } 3118 3119 return PCI_ERS_RESULT_DISCONNECT; 3120 } 3121 3122 static void hns3_reset_prepare(struct pci_dev *pdev) 3123 { 3124 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3125 3126 dev_info(&pdev->dev, "FLR prepare\n"); 3127 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare) 3128 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET); 3129 } 3130 3131 static void hns3_reset_done(struct pci_dev *pdev) 3132 { 3133 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3134 3135 dev_info(&pdev->dev, "FLR done\n"); 3136 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done) 3137 ae_dev->ops->reset_done(ae_dev); 3138 } 3139 3140 static const struct pci_error_handlers hns3_err_handler = { 3141 .error_detected = hns3_error_detected, 3142 .slot_reset = hns3_slot_reset, 3143 .reset_prepare = hns3_reset_prepare, 3144 .reset_done = hns3_reset_done, 3145 }; 3146 3147 static SIMPLE_DEV_PM_OPS(hns3_pm_ops, hns3_suspend, hns3_resume); 3148 3149 static struct pci_driver hns3_driver = { 3150 .name = hns3_driver_name, 3151 .id_table = hns3_pci_tbl, 3152 .probe = hns3_probe, 3153 .remove = hns3_remove, 3154 .shutdown = hns3_shutdown, 3155 .driver.pm = &hns3_pm_ops, 3156 .sriov_configure = hns3_pci_sriov_configure, 3157 .err_handler = &hns3_err_handler, 3158 }; 3159 3160 /* set default feature to hns3 */ 3161 static void hns3_set_default_feature(struct net_device *netdev) 3162 { 3163 struct hnae3_handle *h = hns3_get_handle(netdev); 3164 struct pci_dev *pdev = h->pdev; 3165 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3166 3167 netdev->priv_flags |= IFF_UNICAST_FLT; 3168 3169 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 3170 3171 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | 3172 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 3173 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 3174 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 3175 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 3176 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; 3177 3178 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 3179 netdev->features |= NETIF_F_GRO_HW; 3180 3181 if (!(h->flags & HNAE3_SUPPORT_VF)) 3182 netdev->features |= NETIF_F_NTUPLE; 3183 } 3184 3185 if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) 3186 netdev->features |= NETIF_F_GSO_UDP_L4; 3187 3188 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) 3189 netdev->features |= NETIF_F_HW_CSUM; 3190 else 3191 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3192 3193 if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) 3194 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 3195 3196 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) 3197 netdev->features |= NETIF_F_HW_TC; 3198 3199 netdev->hw_features |= netdev->features; 3200 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 3201 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 3202 3203 netdev->vlan_features |= netdev->features & 3204 ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX | 3205 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_GRO_HW | NETIF_F_NTUPLE | 3206 NETIF_F_HW_TC); 3207 3208 netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID; 3209 } 3210 3211 static int hns3_alloc_buffer(struct hns3_enet_ring *ring, 3212 struct hns3_desc_cb *cb) 3213 { 3214 unsigned int order = hns3_page_order(ring); 3215 struct page *p; 3216 3217 if (ring->page_pool) { 3218 p = page_pool_dev_alloc_frag(ring->page_pool, 3219 &cb->page_offset, 3220 hns3_buf_size(ring)); 3221 if (unlikely(!p)) 3222 return -ENOMEM; 3223 3224 cb->priv = p; 3225 cb->buf = page_address(p); 3226 cb->dma = page_pool_get_dma_addr(p); 3227 cb->type = DESC_TYPE_PP_FRAG; 3228 cb->reuse_flag = 0; 3229 return 0; 3230 } 3231 3232 p = dev_alloc_pages(order); 3233 if (!p) 3234 return -ENOMEM; 3235 3236 cb->priv = p; 3237 cb->page_offset = 0; 3238 cb->reuse_flag = 0; 3239 cb->buf = page_address(p); 3240 cb->length = hns3_page_size(ring); 3241 cb->type = DESC_TYPE_PAGE; 3242 page_ref_add(p, USHRT_MAX - 1); 3243 cb->pagecnt_bias = USHRT_MAX; 3244 3245 return 0; 3246 } 3247 3248 static void hns3_free_buffer(struct hns3_enet_ring *ring, 3249 struct hns3_desc_cb *cb, int budget) 3250 { 3251 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD | 3252 DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB)) 3253 napi_consume_skb(cb->priv, budget); 3254 else if (!HNAE3_IS_TX_RING(ring)) { 3255 if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias) 3256 __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); 3257 else if (cb->type & DESC_TYPE_PP_FRAG) 3258 page_pool_put_full_page(ring->page_pool, cb->priv, 3259 false); 3260 } 3261 memset(cb, 0, sizeof(*cb)); 3262 } 3263 3264 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) 3265 { 3266 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 3267 cb->length, ring_to_dma_dir(ring)); 3268 3269 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) 3270 return -EIO; 3271 3272 return 0; 3273 } 3274 3275 static void hns3_unmap_buffer(struct hns3_enet_ring *ring, 3276 struct hns3_desc_cb *cb) 3277 { 3278 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) 3279 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 3280 ring_to_dma_dir(ring)); 3281 else if ((cb->type & DESC_TYPE_PAGE) && cb->length) 3282 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 3283 ring_to_dma_dir(ring)); 3284 else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD | 3285 DESC_TYPE_SGL_SKB)) 3286 hns3_tx_spare_reclaim_cb(ring, cb); 3287 } 3288 3289 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) 3290 { 3291 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 3292 ring->desc[i].addr = 0; 3293 ring->desc_cb[i].refill = 0; 3294 } 3295 3296 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i, 3297 int budget) 3298 { 3299 struct hns3_desc_cb *cb = &ring->desc_cb[i]; 3300 3301 if (!ring->desc_cb[i].dma) 3302 return; 3303 3304 hns3_buffer_detach(ring, i); 3305 hns3_free_buffer(ring, cb, budget); 3306 } 3307 3308 static void hns3_free_buffers(struct hns3_enet_ring *ring) 3309 { 3310 int i; 3311 3312 for (i = 0; i < ring->desc_num; i++) 3313 hns3_free_buffer_detach(ring, i, 0); 3314 } 3315 3316 /* free desc along with its attached buffer */ 3317 static void hns3_free_desc(struct hns3_enet_ring *ring) 3318 { 3319 int size = ring->desc_num * sizeof(ring->desc[0]); 3320 3321 hns3_free_buffers(ring); 3322 3323 if (ring->desc) { 3324 dma_free_coherent(ring_to_dev(ring), size, 3325 ring->desc, ring->desc_dma_addr); 3326 ring->desc = NULL; 3327 } 3328 } 3329 3330 static int hns3_alloc_desc(struct hns3_enet_ring *ring) 3331 { 3332 int size = ring->desc_num * sizeof(ring->desc[0]); 3333 3334 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, 3335 &ring->desc_dma_addr, GFP_KERNEL); 3336 if (!ring->desc) 3337 return -ENOMEM; 3338 3339 return 0; 3340 } 3341 3342 static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, 3343 struct hns3_desc_cb *cb) 3344 { 3345 int ret; 3346 3347 ret = hns3_alloc_buffer(ring, cb); 3348 if (ret || ring->page_pool) 3349 goto out; 3350 3351 ret = hns3_map_buffer(ring, cb); 3352 if (ret) 3353 goto out_with_buf; 3354 3355 return 0; 3356 3357 out_with_buf: 3358 hns3_free_buffer(ring, cb, 0); 3359 out: 3360 return ret; 3361 } 3362 3363 static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) 3364 { 3365 int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); 3366 3367 if (ret) 3368 return ret; 3369 3370 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + 3371 ring->desc_cb[i].page_offset); 3372 ring->desc_cb[i].refill = 1; 3373 3374 return 0; 3375 } 3376 3377 /* Allocate memory for raw pkg, and map with dma */ 3378 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) 3379 { 3380 int i, j, ret; 3381 3382 for (i = 0; i < ring->desc_num; i++) { 3383 ret = hns3_alloc_and_attach_buffer(ring, i); 3384 if (ret) 3385 goto out_buffer_fail; 3386 } 3387 3388 return 0; 3389 3390 out_buffer_fail: 3391 for (j = i - 1; j >= 0; j--) 3392 hns3_free_buffer_detach(ring, j, 0); 3393 return ret; 3394 } 3395 3396 /* detach a in-used buffer and replace with a reserved one */ 3397 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, 3398 struct hns3_desc_cb *res_cb) 3399 { 3400 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 3401 ring->desc_cb[i] = *res_cb; 3402 ring->desc_cb[i].refill = 1; 3403 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + 3404 ring->desc_cb[i].page_offset); 3405 ring->desc[i].rx.bd_base_info = 0; 3406 } 3407 3408 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) 3409 { 3410 ring->desc_cb[i].reuse_flag = 0; 3411 ring->desc_cb[i].refill = 1; 3412 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + 3413 ring->desc_cb[i].page_offset); 3414 ring->desc[i].rx.bd_base_info = 0; 3415 3416 dma_sync_single_for_device(ring_to_dev(ring), 3417 ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, 3418 hns3_buf_size(ring), 3419 DMA_FROM_DEVICE); 3420 } 3421 3422 static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, 3423 int *bytes, int *pkts, int budget) 3424 { 3425 /* pair with ring->last_to_use update in hns3_tx_doorbell(), 3426 * smp_store_release() is not used in hns3_tx_doorbell() because 3427 * the doorbell operation already have the needed barrier operation. 3428 */ 3429 int ltu = smp_load_acquire(&ring->last_to_use); 3430 int ntc = ring->next_to_clean; 3431 struct hns3_desc_cb *desc_cb; 3432 bool reclaimed = false; 3433 struct hns3_desc *desc; 3434 3435 while (ltu != ntc) { 3436 desc = &ring->desc[ntc]; 3437 3438 if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) & 3439 BIT(HNS3_TXD_VLD_B)) 3440 break; 3441 3442 desc_cb = &ring->desc_cb[ntc]; 3443 3444 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL | 3445 DESC_TYPE_BOUNCE_HEAD | 3446 DESC_TYPE_SGL_SKB)) { 3447 (*pkts)++; 3448 (*bytes) += desc_cb->send_bytes; 3449 } 3450 3451 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ 3452 hns3_free_buffer_detach(ring, ntc, budget); 3453 3454 if (++ntc == ring->desc_num) 3455 ntc = 0; 3456 3457 /* Issue prefetch for next Tx descriptor */ 3458 prefetch(&ring->desc_cb[ntc]); 3459 reclaimed = true; 3460 } 3461 3462 if (unlikely(!reclaimed)) 3463 return false; 3464 3465 /* This smp_store_release() pairs with smp_load_acquire() in 3466 * ring_space called by hns3_nic_net_xmit. 3467 */ 3468 smp_store_release(&ring->next_to_clean, ntc); 3469 3470 hns3_tx_spare_update(ring); 3471 3472 return true; 3473 } 3474 3475 void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) 3476 { 3477 struct net_device *netdev = ring_to_netdev(ring); 3478 struct hns3_nic_priv *priv = netdev_priv(netdev); 3479 struct netdev_queue *dev_queue; 3480 int bytes, pkts; 3481 3482 bytes = 0; 3483 pkts = 0; 3484 3485 if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget))) 3486 return; 3487 3488 ring->tqp_vector->tx_group.total_bytes += bytes; 3489 ring->tqp_vector->tx_group.total_packets += pkts; 3490 3491 u64_stats_update_begin(&ring->syncp); 3492 ring->stats.tx_bytes += bytes; 3493 ring->stats.tx_pkts += pkts; 3494 u64_stats_update_end(&ring->syncp); 3495 3496 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); 3497 netdev_tx_completed_queue(dev_queue, pkts, bytes); 3498 3499 if (unlikely(netif_carrier_ok(netdev) && 3500 ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) { 3501 /* Make sure that anybody stopping the queue after this 3502 * sees the new next_to_clean. 3503 */ 3504 smp_mb(); 3505 if (netif_tx_queue_stopped(dev_queue) && 3506 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 3507 netif_tx_wake_queue(dev_queue); 3508 ring->stats.restart_queue++; 3509 } 3510 } 3511 } 3512 3513 static int hns3_desc_unused(struct hns3_enet_ring *ring) 3514 { 3515 int ntc = ring->next_to_clean; 3516 int ntu = ring->next_to_use; 3517 3518 if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill)) 3519 return ring->desc_num; 3520 3521 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; 3522 } 3523 3524 /* Return true if there is any allocation failure */ 3525 static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, 3526 int cleand_count) 3527 { 3528 struct hns3_desc_cb *desc_cb; 3529 struct hns3_desc_cb res_cbs; 3530 int i, ret; 3531 3532 for (i = 0; i < cleand_count; i++) { 3533 desc_cb = &ring->desc_cb[ring->next_to_use]; 3534 if (desc_cb->reuse_flag) { 3535 hns3_ring_stats_update(ring, reuse_pg_cnt); 3536 3537 hns3_reuse_buffer(ring, ring->next_to_use); 3538 } else { 3539 ret = hns3_alloc_and_map_buffer(ring, &res_cbs); 3540 if (ret) { 3541 hns3_ring_stats_update(ring, sw_err_cnt); 3542 3543 hns3_rl_err(ring_to_netdev(ring), 3544 "alloc rx buffer failed: %d\n", 3545 ret); 3546 3547 writel(i, ring->tqp->io_base + 3548 HNS3_RING_RX_RING_HEAD_REG); 3549 return true; 3550 } 3551 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 3552 3553 hns3_ring_stats_update(ring, non_reuse_pg); 3554 } 3555 3556 ring_ptr_move_fw(ring, next_to_use); 3557 } 3558 3559 writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); 3560 return false; 3561 } 3562 3563 static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) 3564 { 3565 return page_count(cb->priv) == cb->pagecnt_bias; 3566 } 3567 3568 static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i, 3569 struct hns3_enet_ring *ring, 3570 int pull_len, 3571 struct hns3_desc_cb *desc_cb) 3572 { 3573 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; 3574 u32 frag_offset = desc_cb->page_offset + pull_len; 3575 int size = le16_to_cpu(desc->rx.size); 3576 u32 frag_size = size - pull_len; 3577 void *frag = napi_alloc_frag(frag_size); 3578 3579 if (unlikely(!frag)) { 3580 hns3_ring_stats_update(ring, frag_alloc_err); 3581 3582 hns3_rl_err(ring_to_netdev(ring), 3583 "failed to allocate rx frag\n"); 3584 return -ENOMEM; 3585 } 3586 3587 desc_cb->reuse_flag = 1; 3588 memcpy(frag, desc_cb->buf + frag_offset, frag_size); 3589 skb_add_rx_frag(skb, i, virt_to_page(frag), 3590 offset_in_page(frag), frag_size, frag_size); 3591 3592 hns3_ring_stats_update(ring, frag_alloc); 3593 return 0; 3594 } 3595 3596 static void hns3_nic_reuse_page(struct sk_buff *skb, int i, 3597 struct hns3_enet_ring *ring, int pull_len, 3598 struct hns3_desc_cb *desc_cb) 3599 { 3600 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; 3601 u32 frag_offset = desc_cb->page_offset + pull_len; 3602 int size = le16_to_cpu(desc->rx.size); 3603 u32 truesize = hns3_buf_size(ring); 3604 u32 frag_size = size - pull_len; 3605 int ret = 0; 3606 bool reused; 3607 3608 if (ring->page_pool) { 3609 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, 3610 frag_size, truesize); 3611 return; 3612 } 3613 3614 /* Avoid re-using remote or pfmem page */ 3615 if (unlikely(!dev_page_is_reusable(desc_cb->priv))) 3616 goto out; 3617 3618 reused = hns3_can_reuse_page(desc_cb); 3619 3620 /* Rx page can be reused when: 3621 * 1. Rx page is only owned by the driver when page_offset 3622 * is zero, which means 0 @ truesize will be used by 3623 * stack after skb_add_rx_frag() is called, and the rest 3624 * of rx page can be reused by driver. 3625 * Or 3626 * 2. Rx page is only owned by the driver when page_offset 3627 * is non-zero, which means page_offset @ truesize will 3628 * be used by stack after skb_add_rx_frag() is called, 3629 * and 0 @ truesize can be reused by driver. 3630 */ 3631 if ((!desc_cb->page_offset && reused) || 3632 ((desc_cb->page_offset + truesize + truesize) <= 3633 hns3_page_size(ring) && desc_cb->page_offset)) { 3634 desc_cb->page_offset += truesize; 3635 desc_cb->reuse_flag = 1; 3636 } else if (desc_cb->page_offset && reused) { 3637 desc_cb->page_offset = 0; 3638 desc_cb->reuse_flag = 1; 3639 } else if (frag_size <= ring->rx_copybreak) { 3640 ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb); 3641 if (ret) 3642 goto out; 3643 } 3644 3645 out: 3646 desc_cb->pagecnt_bias--; 3647 3648 if (unlikely(!desc_cb->pagecnt_bias)) { 3649 page_ref_add(desc_cb->priv, USHRT_MAX); 3650 desc_cb->pagecnt_bias = USHRT_MAX; 3651 } 3652 3653 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, 3654 frag_size, truesize); 3655 3656 if (unlikely(!desc_cb->reuse_flag)) 3657 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); 3658 } 3659 3660 static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) 3661 { 3662 __be16 type = skb->protocol; 3663 struct tcphdr *th; 3664 int depth = 0; 3665 3666 while (eth_type_vlan(type)) { 3667 struct vlan_hdr *vh; 3668 3669 if ((depth + VLAN_HLEN) > skb_headlen(skb)) 3670 return -EFAULT; 3671 3672 vh = (struct vlan_hdr *)(skb->data + depth); 3673 type = vh->h_vlan_encapsulated_proto; 3674 depth += VLAN_HLEN; 3675 } 3676 3677 skb_set_network_header(skb, depth); 3678 3679 if (type == htons(ETH_P_IP)) { 3680 const struct iphdr *iph = ip_hdr(skb); 3681 3682 depth += sizeof(struct iphdr); 3683 skb_set_transport_header(skb, depth); 3684 th = tcp_hdr(skb); 3685 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, 3686 iph->daddr, 0); 3687 } else if (type == htons(ETH_P_IPV6)) { 3688 const struct ipv6hdr *iph = ipv6_hdr(skb); 3689 3690 depth += sizeof(struct ipv6hdr); 3691 skb_set_transport_header(skb, depth); 3692 th = tcp_hdr(skb); 3693 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, 3694 &iph->daddr, 0); 3695 } else { 3696 hns3_rl_err(skb->dev, 3697 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", 3698 be16_to_cpu(type), depth); 3699 return -EFAULT; 3700 } 3701 3702 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 3703 if (th->cwr) 3704 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 3705 3706 if (l234info & BIT(HNS3_RXD_GRO_FIXID_B)) 3707 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; 3708 3709 skb->csum_start = (unsigned char *)th - skb->head; 3710 skb->csum_offset = offsetof(struct tcphdr, check); 3711 skb->ip_summed = CHECKSUM_PARTIAL; 3712 3713 trace_hns3_gro(skb); 3714 3715 return 0; 3716 } 3717 3718 static bool hns3_checksum_complete(struct hns3_enet_ring *ring, 3719 struct sk_buff *skb, u32 ptype, u16 csum) 3720 { 3721 if (ptype == HNS3_INVALID_PTYPE || 3722 hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE) 3723 return false; 3724 3725 hns3_ring_stats_update(ring, csum_complete); 3726 skb->ip_summed = CHECKSUM_COMPLETE; 3727 skb->csum = csum_unfold((__force __sum16)csum); 3728 3729 return true; 3730 } 3731 3732 static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info, 3733 u32 ol_info, u32 ptype) 3734 { 3735 int l3_type, l4_type; 3736 int ol4_type; 3737 3738 if (ptype != HNS3_INVALID_PTYPE) { 3739 skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level; 3740 skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed; 3741 3742 return; 3743 } 3744 3745 ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M, 3746 HNS3_RXD_OL4ID_S); 3747 switch (ol4_type) { 3748 case HNS3_OL4_TYPE_MAC_IN_UDP: 3749 case HNS3_OL4_TYPE_NVGRE: 3750 skb->csum_level = 1; 3751 fallthrough; 3752 case HNS3_OL4_TYPE_NO_TUN: 3753 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, 3754 HNS3_RXD_L3ID_S); 3755 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, 3756 HNS3_RXD_L4ID_S); 3757 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ 3758 if ((l3_type == HNS3_L3_TYPE_IPV4 || 3759 l3_type == HNS3_L3_TYPE_IPV6) && 3760 (l4_type == HNS3_L4_TYPE_UDP || 3761 l4_type == HNS3_L4_TYPE_TCP || 3762 l4_type == HNS3_L4_TYPE_SCTP)) 3763 skb->ip_summed = CHECKSUM_UNNECESSARY; 3764 break; 3765 default: 3766 break; 3767 } 3768 } 3769 3770 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, 3771 u32 l234info, u32 bd_base_info, u32 ol_info, 3772 u16 csum) 3773 { 3774 struct net_device *netdev = ring_to_netdev(ring); 3775 struct hns3_nic_priv *priv = netdev_priv(netdev); 3776 u32 ptype = HNS3_INVALID_PTYPE; 3777 3778 skb->ip_summed = CHECKSUM_NONE; 3779 3780 skb_checksum_none_assert(skb); 3781 3782 if (!(netdev->features & NETIF_F_RXCSUM)) 3783 return; 3784 3785 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) 3786 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, 3787 HNS3_RXD_PTYPE_S); 3788 3789 if (hns3_checksum_complete(ring, skb, ptype, csum)) 3790 return; 3791 3792 /* check if hardware has done checksum */ 3793 if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) 3794 return; 3795 3796 if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | 3797 BIT(HNS3_RXD_OL3E_B) | 3798 BIT(HNS3_RXD_OL4E_B)))) { 3799 hns3_ring_stats_update(ring, l3l4_csum_err); 3800 3801 return; 3802 } 3803 3804 hns3_rx_handle_csum(skb, l234info, ol_info, ptype); 3805 } 3806 3807 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) 3808 { 3809 if (skb_has_frag_list(skb)) 3810 napi_gro_flush(&ring->tqp_vector->napi, false); 3811 3812 napi_gro_receive(&ring->tqp_vector->napi, skb); 3813 } 3814 3815 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, 3816 struct hns3_desc *desc, u32 l234info, 3817 u16 *vlan_tag) 3818 { 3819 struct hnae3_handle *handle = ring->tqp->handle; 3820 struct pci_dev *pdev = ring->tqp->handle->pdev; 3821 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3822 3823 if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) { 3824 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 3825 if (!(*vlan_tag & VLAN_VID_MASK)) 3826 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 3827 3828 return (*vlan_tag != 0); 3829 } 3830 3831 #define HNS3_STRP_OUTER_VLAN 0x1 3832 #define HNS3_STRP_INNER_VLAN 0x2 3833 #define HNS3_STRP_BOTH 0x3 3834 3835 /* Hardware always insert VLAN tag into RX descriptor when 3836 * remove the tag from packet, driver needs to determine 3837 * reporting which tag to stack. 3838 */ 3839 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, 3840 HNS3_RXD_STRP_TAGP_S)) { 3841 case HNS3_STRP_OUTER_VLAN: 3842 if (handle->port_base_vlan_state != 3843 HNAE3_PORT_BASE_VLAN_DISABLE) 3844 return false; 3845 3846 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 3847 return true; 3848 case HNS3_STRP_INNER_VLAN: 3849 if (handle->port_base_vlan_state != 3850 HNAE3_PORT_BASE_VLAN_DISABLE) 3851 return false; 3852 3853 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 3854 return true; 3855 case HNS3_STRP_BOTH: 3856 if (handle->port_base_vlan_state == 3857 HNAE3_PORT_BASE_VLAN_DISABLE) 3858 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 3859 else 3860 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 3861 3862 return true; 3863 default: 3864 return false; 3865 } 3866 } 3867 3868 static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring) 3869 { 3870 ring->desc[ring->next_to_clean].rx.bd_base_info &= 3871 cpu_to_le32(~BIT(HNS3_RXD_VLD_B)); 3872 ring->desc_cb[ring->next_to_clean].refill = 0; 3873 ring->next_to_clean += 1; 3874 3875 if (unlikely(ring->next_to_clean == ring->desc_num)) 3876 ring->next_to_clean = 0; 3877 } 3878 3879 static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, 3880 unsigned char *va) 3881 { 3882 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 3883 struct net_device *netdev = ring_to_netdev(ring); 3884 struct sk_buff *skb; 3885 3886 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); 3887 skb = ring->skb; 3888 if (unlikely(!skb)) { 3889 hns3_rl_err(netdev, "alloc rx skb fail\n"); 3890 hns3_ring_stats_update(ring, sw_err_cnt); 3891 3892 return -ENOMEM; 3893 } 3894 3895 trace_hns3_rx_desc(ring); 3896 prefetchw(skb->data); 3897 3898 ring->pending_buf = 1; 3899 ring->frag_num = 0; 3900 ring->tail_skb = NULL; 3901 if (length <= HNS3_RX_HEAD_SIZE) { 3902 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 3903 3904 /* We can reuse buffer as-is, just make sure it is reusable */ 3905 if (dev_page_is_reusable(desc_cb->priv)) 3906 desc_cb->reuse_flag = 1; 3907 else if (desc_cb->type & DESC_TYPE_PP_FRAG) 3908 page_pool_put_full_page(ring->page_pool, desc_cb->priv, 3909 false); 3910 else /* This page cannot be reused so discard it */ 3911 __page_frag_cache_drain(desc_cb->priv, 3912 desc_cb->pagecnt_bias); 3913 3914 hns3_rx_ring_move_fw(ring); 3915 return 0; 3916 } 3917 3918 if (ring->page_pool) 3919 skb_mark_for_recycle(skb); 3920 3921 hns3_ring_stats_update(ring, seg_pkt_cnt); 3922 3923 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); 3924 __skb_put(skb, ring->pull_len); 3925 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, 3926 desc_cb); 3927 hns3_rx_ring_move_fw(ring); 3928 3929 return 0; 3930 } 3931 3932 static int hns3_add_frag(struct hns3_enet_ring *ring) 3933 { 3934 struct sk_buff *skb = ring->skb; 3935 struct sk_buff *head_skb = skb; 3936 struct sk_buff *new_skb; 3937 struct hns3_desc_cb *desc_cb; 3938 struct hns3_desc *desc; 3939 u32 bd_base_info; 3940 3941 do { 3942 desc = &ring->desc[ring->next_to_clean]; 3943 desc_cb = &ring->desc_cb[ring->next_to_clean]; 3944 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 3945 /* make sure HW write desc complete */ 3946 dma_rmb(); 3947 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) 3948 return -ENXIO; 3949 3950 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { 3951 new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); 3952 if (unlikely(!new_skb)) { 3953 hns3_rl_err(ring_to_netdev(ring), 3954 "alloc rx fraglist skb fail\n"); 3955 return -ENXIO; 3956 } 3957 3958 if (ring->page_pool) 3959 skb_mark_for_recycle(new_skb); 3960 3961 ring->frag_num = 0; 3962 3963 if (ring->tail_skb) { 3964 ring->tail_skb->next = new_skb; 3965 ring->tail_skb = new_skb; 3966 } else { 3967 skb_shinfo(skb)->frag_list = new_skb; 3968 ring->tail_skb = new_skb; 3969 } 3970 } 3971 3972 if (ring->tail_skb) { 3973 head_skb->truesize += hns3_buf_size(ring); 3974 head_skb->data_len += le16_to_cpu(desc->rx.size); 3975 head_skb->len += le16_to_cpu(desc->rx.size); 3976 skb = ring->tail_skb; 3977 } 3978 3979 dma_sync_single_for_cpu(ring_to_dev(ring), 3980 desc_cb->dma + desc_cb->page_offset, 3981 hns3_buf_size(ring), 3982 DMA_FROM_DEVICE); 3983 3984 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); 3985 trace_hns3_rx_desc(ring); 3986 hns3_rx_ring_move_fw(ring); 3987 ring->pending_buf++; 3988 } while (!(bd_base_info & BIT(HNS3_RXD_FE_B))); 3989 3990 return 0; 3991 } 3992 3993 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, 3994 struct sk_buff *skb, u32 l234info, 3995 u32 bd_base_info, u32 ol_info, u16 csum) 3996 { 3997 struct net_device *netdev = ring_to_netdev(ring); 3998 struct hns3_nic_priv *priv = netdev_priv(netdev); 3999 u32 l3_type; 4000 4001 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, 4002 HNS3_RXD_GRO_SIZE_M, 4003 HNS3_RXD_GRO_SIZE_S); 4004 /* if there is no HW GRO, do not set gro params */ 4005 if (!skb_shinfo(skb)->gso_size) { 4006 hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info, 4007 csum); 4008 return 0; 4009 } 4010 4011 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, 4012 HNS3_RXD_GRO_COUNT_M, 4013 HNS3_RXD_GRO_COUNT_S); 4014 4015 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { 4016 u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, 4017 HNS3_RXD_PTYPE_S); 4018 4019 l3_type = hns3_rx_ptype_tbl[ptype].l3_type; 4020 } else { 4021 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, 4022 HNS3_RXD_L3ID_S); 4023 } 4024 4025 if (l3_type == HNS3_L3_TYPE_IPV4) 4026 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 4027 else if (l3_type == HNS3_L3_TYPE_IPV6) 4028 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 4029 else 4030 return -EFAULT; 4031 4032 return hns3_gro_complete(skb, l234info); 4033 } 4034 4035 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, 4036 struct sk_buff *skb, u32 rss_hash) 4037 { 4038 struct hnae3_handle *handle = ring->tqp->handle; 4039 enum pkt_hash_types rss_type; 4040 4041 if (rss_hash) 4042 rss_type = handle->kinfo.rss_type; 4043 else 4044 rss_type = PKT_HASH_TYPE_NONE; 4045 4046 skb_set_hash(skb, rss_hash, rss_type); 4047 } 4048 4049 static void hns3_handle_rx_ts_info(struct net_device *netdev, 4050 struct hns3_desc *desc, struct sk_buff *skb, 4051 u32 bd_base_info) 4052 { 4053 if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) { 4054 struct hnae3_handle *h = hns3_get_handle(netdev); 4055 u32 nsec = le32_to_cpu(desc->ts_nsec); 4056 u32 sec = le32_to_cpu(desc->ts_sec); 4057 4058 if (h->ae_algo->ops->get_rx_hwts) 4059 h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec); 4060 } 4061 } 4062 4063 static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring *ring, 4064 struct hns3_desc *desc, struct sk_buff *skb, 4065 u32 l234info) 4066 { 4067 struct net_device *netdev = ring_to_netdev(ring); 4068 4069 /* Based on hw strategy, the tag offloaded will be stored at 4070 * ot_vlan_tag in two layer tag case, and stored at vlan_tag 4071 * in one layer tag case. 4072 */ 4073 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { 4074 u16 vlan_tag; 4075 4076 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) 4077 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 4078 vlan_tag); 4079 } 4080 } 4081 4082 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) 4083 { 4084 struct net_device *netdev = ring_to_netdev(ring); 4085 enum hns3_pkt_l2t_type l2_frame_type; 4086 u32 bd_base_info, l234info, ol_info; 4087 struct hns3_desc *desc; 4088 unsigned int len; 4089 int pre_ntc, ret; 4090 u16 csum; 4091 4092 /* bdinfo handled below is only valid on the last BD of the 4093 * current packet, and ring->next_to_clean indicates the first 4094 * descriptor of next packet, so need - 1 below. 4095 */ 4096 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : 4097 (ring->desc_num - 1); 4098 desc = &ring->desc[pre_ntc]; 4099 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 4100 l234info = le32_to_cpu(desc->rx.l234_info); 4101 ol_info = le32_to_cpu(desc->rx.ol_info); 4102 csum = le16_to_cpu(desc->csum); 4103 4104 hns3_handle_rx_ts_info(netdev, desc, skb, bd_base_info); 4105 4106 hns3_handle_rx_vlan_tag(ring, desc, skb, l234info); 4107 4108 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | 4109 BIT(HNS3_RXD_L2E_B))))) { 4110 u64_stats_update_begin(&ring->syncp); 4111 if (l234info & BIT(HNS3_RXD_L2E_B)) 4112 ring->stats.l2_err++; 4113 else 4114 ring->stats.err_pkt_len++; 4115 u64_stats_update_end(&ring->syncp); 4116 4117 return -EFAULT; 4118 } 4119 4120 len = skb->len; 4121 4122 /* Do update ip stack process */ 4123 skb->protocol = eth_type_trans(skb, netdev); 4124 4125 /* This is needed in order to enable forwarding support */ 4126 ret = hns3_set_gro_and_checksum(ring, skb, l234info, 4127 bd_base_info, ol_info, csum); 4128 if (unlikely(ret)) { 4129 hns3_ring_stats_update(ring, rx_err_cnt); 4130 return ret; 4131 } 4132 4133 l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, 4134 HNS3_RXD_DMAC_S); 4135 4136 u64_stats_update_begin(&ring->syncp); 4137 ring->stats.rx_pkts++; 4138 ring->stats.rx_bytes += len; 4139 4140 if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) 4141 ring->stats.rx_multicast++; 4142 4143 u64_stats_update_end(&ring->syncp); 4144 4145 ring->tqp_vector->rx_group.total_bytes += len; 4146 4147 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); 4148 return 0; 4149 } 4150 4151 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) 4152 { 4153 struct sk_buff *skb = ring->skb; 4154 struct hns3_desc_cb *desc_cb; 4155 struct hns3_desc *desc; 4156 unsigned int length; 4157 u32 bd_base_info; 4158 int ret; 4159 4160 desc = &ring->desc[ring->next_to_clean]; 4161 desc_cb = &ring->desc_cb[ring->next_to_clean]; 4162 4163 prefetch(desc); 4164 4165 if (!skb) { 4166 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 4167 /* Check valid BD */ 4168 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) 4169 return -ENXIO; 4170 4171 dma_rmb(); 4172 length = le16_to_cpu(desc->rx.size); 4173 4174 ring->va = desc_cb->buf + desc_cb->page_offset; 4175 4176 dma_sync_single_for_cpu(ring_to_dev(ring), 4177 desc_cb->dma + desc_cb->page_offset, 4178 hns3_buf_size(ring), 4179 DMA_FROM_DEVICE); 4180 4181 /* Prefetch first cache line of first page. 4182 * Idea is to cache few bytes of the header of the packet. 4183 * Our L1 Cache line size is 64B so need to prefetch twice to make 4184 * it 128B. But in actual we can have greater size of caches with 4185 * 128B Level 1 cache lines. In such a case, single fetch would 4186 * suffice to cache in the relevant part of the header. 4187 */ 4188 net_prefetch(ring->va); 4189 4190 ret = hns3_alloc_skb(ring, length, ring->va); 4191 skb = ring->skb; 4192 4193 if (ret < 0) /* alloc buffer fail */ 4194 return ret; 4195 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */ 4196 ret = hns3_add_frag(ring); 4197 if (ret) 4198 return ret; 4199 } 4200 } else { 4201 ret = hns3_add_frag(ring); 4202 if (ret) 4203 return ret; 4204 } 4205 4206 /* As the head data may be changed when GRO enable, copy 4207 * the head data in after other data rx completed 4208 */ 4209 if (skb->len > HNS3_RX_HEAD_SIZE) 4210 memcpy(skb->data, ring->va, 4211 ALIGN(ring->pull_len, sizeof(long))); 4212 4213 ret = hns3_handle_bdinfo(ring, skb); 4214 if (unlikely(ret)) { 4215 dev_kfree_skb_any(skb); 4216 return ret; 4217 } 4218 4219 skb_record_rx_queue(skb, ring->tqp->tqp_index); 4220 return 0; 4221 } 4222 4223 int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, 4224 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) 4225 { 4226 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 4227 int unused_count = hns3_desc_unused(ring); 4228 bool failure = false; 4229 int recv_pkts = 0; 4230 int err; 4231 4232 unused_count -= ring->pending_buf; 4233 4234 while (recv_pkts < budget) { 4235 /* Reuse or realloc buffers */ 4236 if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 4237 failure = failure || 4238 hns3_nic_alloc_rx_buffers(ring, unused_count); 4239 unused_count = 0; 4240 } 4241 4242 /* Poll one pkt */ 4243 err = hns3_handle_rx_bd(ring); 4244 /* Do not get FE for the packet or failed to alloc skb */ 4245 if (unlikely(!ring->skb || err == -ENXIO)) { 4246 goto out; 4247 } else if (likely(!err)) { 4248 rx_fn(ring, ring->skb); 4249 recv_pkts++; 4250 } 4251 4252 unused_count += ring->pending_buf; 4253 ring->skb = NULL; 4254 ring->pending_buf = 0; 4255 } 4256 4257 out: 4258 /* sync head pointer before exiting, since hardware will calculate 4259 * FBD number with head pointer 4260 */ 4261 if (unused_count > 0) 4262 failure = failure || 4263 hns3_nic_alloc_rx_buffers(ring, unused_count); 4264 4265 return failure ? budget : recv_pkts; 4266 } 4267 4268 static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector) 4269 { 4270 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; 4271 struct dim_sample sample = {}; 4272 4273 if (!rx_group->coal.adapt_enable) 4274 return; 4275 4276 dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets, 4277 rx_group->total_bytes, &sample); 4278 net_dim(&rx_group->dim, sample); 4279 } 4280 4281 static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector) 4282 { 4283 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; 4284 struct dim_sample sample = {}; 4285 4286 if (!tx_group->coal.adapt_enable) 4287 return; 4288 4289 dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets, 4290 tx_group->total_bytes, &sample); 4291 net_dim(&tx_group->dim, sample); 4292 } 4293 4294 static int hns3_nic_common_poll(struct napi_struct *napi, int budget) 4295 { 4296 struct hns3_nic_priv *priv = netdev_priv(napi->dev); 4297 struct hns3_enet_ring *ring; 4298 int rx_pkt_total = 0; 4299 4300 struct hns3_enet_tqp_vector *tqp_vector = 4301 container_of(napi, struct hns3_enet_tqp_vector, napi); 4302 bool clean_complete = true; 4303 int rx_budget = budget; 4304 4305 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 4306 napi_complete(napi); 4307 return 0; 4308 } 4309 4310 /* Since the actual Tx work is minimal, we can give the Tx a larger 4311 * budget and be more aggressive about cleaning up the Tx descriptors. 4312 */ 4313 hns3_for_each_ring(ring, tqp_vector->tx_group) 4314 hns3_clean_tx_ring(ring, budget); 4315 4316 /* make sure rx ring budget not smaller than 1 */ 4317 if (tqp_vector->num_tqps > 1) 4318 rx_budget = max(budget / tqp_vector->num_tqps, 1); 4319 4320 hns3_for_each_ring(ring, tqp_vector->rx_group) { 4321 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, 4322 hns3_rx_skb); 4323 if (rx_cleaned >= rx_budget) 4324 clean_complete = false; 4325 4326 rx_pkt_total += rx_cleaned; 4327 } 4328 4329 tqp_vector->rx_group.total_packets += rx_pkt_total; 4330 4331 if (!clean_complete) 4332 return budget; 4333 4334 if (napi_complete(napi) && 4335 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 4336 hns3_update_rx_int_coalesce(tqp_vector); 4337 hns3_update_tx_int_coalesce(tqp_vector); 4338 4339 hns3_mask_vector_irq(tqp_vector, 1); 4340 } 4341 4342 return rx_pkt_total; 4343 } 4344 4345 static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 4346 struct hnae3_ring_chain_node **head, 4347 bool is_tx) 4348 { 4349 u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX; 4350 u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX; 4351 struct hnae3_ring_chain_node *cur_chain = *head; 4352 struct pci_dev *pdev = tqp_vector->handle->pdev; 4353 struct hnae3_ring_chain_node *chain; 4354 struct hns3_enet_ring *ring; 4355 4356 ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring; 4357 4358 if (cur_chain) { 4359 while (cur_chain->next) 4360 cur_chain = cur_chain->next; 4361 } 4362 4363 while (ring) { 4364 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); 4365 if (!chain) 4366 return -ENOMEM; 4367 if (cur_chain) 4368 cur_chain->next = chain; 4369 else 4370 *head = chain; 4371 chain->tqp_index = ring->tqp->tqp_index; 4372 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 4373 bit_value); 4374 hnae3_set_field(chain->int_gl_idx, 4375 HNAE3_RING_GL_IDX_M, 4376 HNAE3_RING_GL_IDX_S, field_value); 4377 4378 cur_chain = chain; 4379 4380 ring = ring->next; 4381 } 4382 4383 return 0; 4384 } 4385 4386 static struct hnae3_ring_chain_node * 4387 hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector) 4388 { 4389 struct pci_dev *pdev = tqp_vector->handle->pdev; 4390 struct hnae3_ring_chain_node *cur_chain = NULL; 4391 struct hnae3_ring_chain_node *chain; 4392 4393 if (hns3_create_ring_chain(tqp_vector, &cur_chain, true)) 4394 goto err_free_chain; 4395 4396 if (hns3_create_ring_chain(tqp_vector, &cur_chain, false)) 4397 goto err_free_chain; 4398 4399 return cur_chain; 4400 4401 err_free_chain: 4402 while (cur_chain) { 4403 chain = cur_chain->next; 4404 devm_kfree(&pdev->dev, cur_chain); 4405 cur_chain = chain; 4406 } 4407 4408 return NULL; 4409 } 4410 4411 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 4412 struct hnae3_ring_chain_node *head) 4413 { 4414 struct pci_dev *pdev = tqp_vector->handle->pdev; 4415 struct hnae3_ring_chain_node *chain_tmp, *chain; 4416 4417 chain = head; 4418 4419 while (chain) { 4420 chain_tmp = chain->next; 4421 devm_kfree(&pdev->dev, chain); 4422 chain = chain_tmp; 4423 } 4424 } 4425 4426 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, 4427 struct hns3_enet_ring *ring) 4428 { 4429 ring->next = group->ring; 4430 group->ring = ring; 4431 4432 group->count++; 4433 } 4434 4435 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) 4436 { 4437 struct pci_dev *pdev = priv->ae_handle->pdev; 4438 struct hns3_enet_tqp_vector *tqp_vector; 4439 int num_vectors = priv->vector_num; 4440 int numa_node; 4441 int vector_i; 4442 4443 numa_node = dev_to_node(&pdev->dev); 4444 4445 for (vector_i = 0; vector_i < num_vectors; vector_i++) { 4446 tqp_vector = &priv->tqp_vector[vector_i]; 4447 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), 4448 &tqp_vector->affinity_mask); 4449 } 4450 } 4451 4452 static void hns3_rx_dim_work(struct work_struct *work) 4453 { 4454 struct dim *dim = container_of(work, struct dim, work); 4455 struct hns3_enet_ring_group *group = container_of(dim, 4456 struct hns3_enet_ring_group, dim); 4457 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; 4458 struct dim_cq_moder cur_moder = 4459 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 4460 4461 hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec); 4462 tqp_vector->rx_group.coal.int_gl = cur_moder.usec; 4463 4464 if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) { 4465 hns3_set_vector_coalesce_rx_ql(tqp_vector, cur_moder.pkts); 4466 tqp_vector->rx_group.coal.int_ql = cur_moder.pkts; 4467 } 4468 4469 dim->state = DIM_START_MEASURE; 4470 } 4471 4472 static void hns3_tx_dim_work(struct work_struct *work) 4473 { 4474 struct dim *dim = container_of(work, struct dim, work); 4475 struct hns3_enet_ring_group *group = container_of(dim, 4476 struct hns3_enet_ring_group, dim); 4477 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; 4478 struct dim_cq_moder cur_moder = 4479 net_dim_get_tx_moderation(dim->mode, dim->profile_ix); 4480 4481 hns3_set_vector_coalesce_tx_gl(tqp_vector, cur_moder.usec); 4482 tqp_vector->tx_group.coal.int_gl = cur_moder.usec; 4483 4484 if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) { 4485 hns3_set_vector_coalesce_tx_ql(tqp_vector, cur_moder.pkts); 4486 tqp_vector->tx_group.coal.int_ql = cur_moder.pkts; 4487 } 4488 4489 dim->state = DIM_START_MEASURE; 4490 } 4491 4492 static void hns3_nic_init_dim(struct hns3_enet_tqp_vector *tqp_vector) 4493 { 4494 INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work); 4495 INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work); 4496 } 4497 4498 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) 4499 { 4500 struct hnae3_handle *h = priv->ae_handle; 4501 struct hns3_enet_tqp_vector *tqp_vector; 4502 int ret; 4503 int i; 4504 4505 hns3_nic_set_cpumask(priv); 4506 4507 for (i = 0; i < priv->vector_num; i++) { 4508 tqp_vector = &priv->tqp_vector[i]; 4509 hns3_vector_coalesce_init_hw(tqp_vector, priv); 4510 tqp_vector->num_tqps = 0; 4511 hns3_nic_init_dim(tqp_vector); 4512 } 4513 4514 for (i = 0; i < h->kinfo.num_tqps; i++) { 4515 u16 vector_i = i % priv->vector_num; 4516 u16 tqp_num = h->kinfo.num_tqps; 4517 4518 tqp_vector = &priv->tqp_vector[vector_i]; 4519 4520 hns3_add_ring_to_group(&tqp_vector->tx_group, 4521 &priv->ring[i]); 4522 4523 hns3_add_ring_to_group(&tqp_vector->rx_group, 4524 &priv->ring[i + tqp_num]); 4525 4526 priv->ring[i].tqp_vector = tqp_vector; 4527 priv->ring[i + tqp_num].tqp_vector = tqp_vector; 4528 tqp_vector->num_tqps++; 4529 } 4530 4531 for (i = 0; i < priv->vector_num; i++) { 4532 struct hnae3_ring_chain_node *vector_ring_chain; 4533 4534 tqp_vector = &priv->tqp_vector[i]; 4535 4536 tqp_vector->rx_group.total_bytes = 0; 4537 tqp_vector->rx_group.total_packets = 0; 4538 tqp_vector->tx_group.total_bytes = 0; 4539 tqp_vector->tx_group.total_packets = 0; 4540 tqp_vector->handle = h; 4541 4542 vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector); 4543 if (!vector_ring_chain) { 4544 ret = -ENOMEM; 4545 goto map_ring_fail; 4546 } 4547 4548 ret = h->ae_algo->ops->map_ring_to_vector(h, 4549 tqp_vector->vector_irq, vector_ring_chain); 4550 4551 hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain); 4552 4553 if (ret) 4554 goto map_ring_fail; 4555 4556 netif_napi_add(priv->netdev, &tqp_vector->napi, 4557 hns3_nic_common_poll, NAPI_POLL_WEIGHT); 4558 } 4559 4560 return 0; 4561 4562 map_ring_fail: 4563 while (i--) 4564 netif_napi_del(&priv->tqp_vector[i].napi); 4565 4566 return ret; 4567 } 4568 4569 static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv) 4570 { 4571 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); 4572 struct hns3_enet_coalesce *tx_coal = &priv->tx_coal; 4573 struct hns3_enet_coalesce *rx_coal = &priv->rx_coal; 4574 4575 /* initialize the configuration for interrupt coalescing. 4576 * 1. GL (Interrupt Gap Limiter) 4577 * 2. RL (Interrupt Rate Limiter) 4578 * 3. QL (Interrupt Quantity Limiter) 4579 * 4580 * Default: enable interrupt coalescing self-adaptive and GL 4581 */ 4582 tx_coal->adapt_enable = 1; 4583 rx_coal->adapt_enable = 1; 4584 4585 tx_coal->int_gl = HNS3_INT_GL_50K; 4586 rx_coal->int_gl = HNS3_INT_GL_50K; 4587 4588 rx_coal->flow_level = HNS3_FLOW_LOW; 4589 tx_coal->flow_level = HNS3_FLOW_LOW; 4590 4591 if (ae_dev->dev_specs.int_ql_max) { 4592 tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; 4593 rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; 4594 } 4595 } 4596 4597 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) 4598 { 4599 struct hnae3_handle *h = priv->ae_handle; 4600 struct hns3_enet_tqp_vector *tqp_vector; 4601 struct hnae3_vector_info *vector; 4602 struct pci_dev *pdev = h->pdev; 4603 u16 tqp_num = h->kinfo.num_tqps; 4604 u16 vector_num; 4605 int ret = 0; 4606 u16 i; 4607 4608 /* RSS size, cpu online and vector_num should be the same */ 4609 /* Should consider 2p/4p later */ 4610 vector_num = min_t(u16, num_online_cpus(), tqp_num); 4611 4612 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), 4613 GFP_KERNEL); 4614 if (!vector) 4615 return -ENOMEM; 4616 4617 /* save the actual available vector number */ 4618 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); 4619 4620 priv->vector_num = vector_num; 4621 priv->tqp_vector = (struct hns3_enet_tqp_vector *) 4622 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), 4623 GFP_KERNEL); 4624 if (!priv->tqp_vector) { 4625 ret = -ENOMEM; 4626 goto out; 4627 } 4628 4629 for (i = 0; i < priv->vector_num; i++) { 4630 tqp_vector = &priv->tqp_vector[i]; 4631 tqp_vector->idx = i; 4632 tqp_vector->mask_addr = vector[i].io_addr; 4633 tqp_vector->vector_irq = vector[i].vector; 4634 hns3_vector_coalesce_init(tqp_vector, priv); 4635 } 4636 4637 out: 4638 devm_kfree(&pdev->dev, vector); 4639 return ret; 4640 } 4641 4642 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) 4643 { 4644 group->ring = NULL; 4645 group->count = 0; 4646 } 4647 4648 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) 4649 { 4650 struct hnae3_ring_chain_node *vector_ring_chain; 4651 struct hnae3_handle *h = priv->ae_handle; 4652 struct hns3_enet_tqp_vector *tqp_vector; 4653 int i; 4654 4655 for (i = 0; i < priv->vector_num; i++) { 4656 tqp_vector = &priv->tqp_vector[i]; 4657 4658 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) 4659 continue; 4660 4661 /* Since the mapping can be overwritten, when fail to get the 4662 * chain between vector and ring, we should go on to deal with 4663 * the remaining options. 4664 */ 4665 vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector); 4666 if (!vector_ring_chain) 4667 dev_warn(priv->dev, "failed to get ring chain\n"); 4668 4669 h->ae_algo->ops->unmap_ring_from_vector(h, 4670 tqp_vector->vector_irq, vector_ring_chain); 4671 4672 hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain); 4673 4674 hns3_clear_ring_group(&tqp_vector->rx_group); 4675 hns3_clear_ring_group(&tqp_vector->tx_group); 4676 netif_napi_del(&priv->tqp_vector[i].napi); 4677 } 4678 } 4679 4680 static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) 4681 { 4682 struct hnae3_handle *h = priv->ae_handle; 4683 struct pci_dev *pdev = h->pdev; 4684 int i, ret; 4685 4686 for (i = 0; i < priv->vector_num; i++) { 4687 struct hns3_enet_tqp_vector *tqp_vector; 4688 4689 tqp_vector = &priv->tqp_vector[i]; 4690 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); 4691 if (ret) 4692 return; 4693 } 4694 4695 devm_kfree(&pdev->dev, priv->tqp_vector); 4696 } 4697 4698 static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, 4699 unsigned int ring_type) 4700 { 4701 int queue_num = priv->ae_handle->kinfo.num_tqps; 4702 struct hns3_enet_ring *ring; 4703 int desc_num; 4704 4705 if (ring_type == HNAE3_RING_TYPE_TX) { 4706 ring = &priv->ring[q->tqp_index]; 4707 desc_num = priv->ae_handle->kinfo.num_tx_desc; 4708 ring->queue_index = q->tqp_index; 4709 ring->tx_copybreak = priv->tx_copybreak; 4710 ring->last_to_use = 0; 4711 } else { 4712 ring = &priv->ring[q->tqp_index + queue_num]; 4713 desc_num = priv->ae_handle->kinfo.num_rx_desc; 4714 ring->queue_index = q->tqp_index; 4715 ring->rx_copybreak = priv->rx_copybreak; 4716 } 4717 4718 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); 4719 4720 ring->tqp = q; 4721 ring->desc = NULL; 4722 ring->desc_cb = NULL; 4723 ring->dev = priv->dev; 4724 ring->desc_dma_addr = 0; 4725 ring->buf_size = q->buf_size; 4726 ring->desc_num = desc_num; 4727 ring->next_to_use = 0; 4728 ring->next_to_clean = 0; 4729 } 4730 4731 static void hns3_queue_to_ring(struct hnae3_queue *tqp, 4732 struct hns3_nic_priv *priv) 4733 { 4734 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); 4735 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); 4736 } 4737 4738 static int hns3_get_ring_config(struct hns3_nic_priv *priv) 4739 { 4740 struct hnae3_handle *h = priv->ae_handle; 4741 struct pci_dev *pdev = h->pdev; 4742 int i; 4743 4744 priv->ring = devm_kzalloc(&pdev->dev, 4745 array3_size(h->kinfo.num_tqps, 4746 sizeof(*priv->ring), 2), 4747 GFP_KERNEL); 4748 if (!priv->ring) 4749 return -ENOMEM; 4750 4751 for (i = 0; i < h->kinfo.num_tqps; i++) 4752 hns3_queue_to_ring(h->kinfo.tqp[i], priv); 4753 4754 return 0; 4755 } 4756 4757 static void hns3_put_ring_config(struct hns3_nic_priv *priv) 4758 { 4759 if (!priv->ring) 4760 return; 4761 4762 devm_kfree(priv->dev, priv->ring); 4763 priv->ring = NULL; 4764 } 4765 4766 static void hns3_alloc_page_pool(struct hns3_enet_ring *ring) 4767 { 4768 struct page_pool_params pp_params = { 4769 .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG | 4770 PP_FLAG_DMA_SYNC_DEV, 4771 .order = hns3_page_order(ring), 4772 .pool_size = ring->desc_num * hns3_buf_size(ring) / 4773 (PAGE_SIZE << hns3_page_order(ring)), 4774 .nid = dev_to_node(ring_to_dev(ring)), 4775 .dev = ring_to_dev(ring), 4776 .dma_dir = DMA_FROM_DEVICE, 4777 .offset = 0, 4778 .max_len = PAGE_SIZE << hns3_page_order(ring), 4779 }; 4780 4781 ring->page_pool = page_pool_create(&pp_params); 4782 if (IS_ERR(ring->page_pool)) { 4783 dev_warn(ring_to_dev(ring), "page pool creation failed: %ld\n", 4784 PTR_ERR(ring->page_pool)); 4785 ring->page_pool = NULL; 4786 } 4787 } 4788 4789 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) 4790 { 4791 int ret; 4792 4793 if (ring->desc_num <= 0 || ring->buf_size <= 0) 4794 return -EINVAL; 4795 4796 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, 4797 sizeof(ring->desc_cb[0]), GFP_KERNEL); 4798 if (!ring->desc_cb) { 4799 ret = -ENOMEM; 4800 goto out; 4801 } 4802 4803 ret = hns3_alloc_desc(ring); 4804 if (ret) 4805 goto out_with_desc_cb; 4806 4807 if (!HNAE3_IS_TX_RING(ring)) { 4808 if (page_pool_enabled) 4809 hns3_alloc_page_pool(ring); 4810 4811 ret = hns3_alloc_ring_buffers(ring); 4812 if (ret) 4813 goto out_with_desc; 4814 } else { 4815 hns3_init_tx_spare_buffer(ring); 4816 } 4817 4818 return 0; 4819 4820 out_with_desc: 4821 hns3_free_desc(ring); 4822 out_with_desc_cb: 4823 devm_kfree(ring_to_dev(ring), ring->desc_cb); 4824 ring->desc_cb = NULL; 4825 out: 4826 return ret; 4827 } 4828 4829 void hns3_fini_ring(struct hns3_enet_ring *ring) 4830 { 4831 hns3_free_desc(ring); 4832 devm_kfree(ring_to_dev(ring), ring->desc_cb); 4833 ring->desc_cb = NULL; 4834 ring->next_to_clean = 0; 4835 ring->next_to_use = 0; 4836 ring->last_to_use = 0; 4837 ring->pending_buf = 0; 4838 if (!HNAE3_IS_TX_RING(ring) && ring->skb) { 4839 dev_kfree_skb_any(ring->skb); 4840 ring->skb = NULL; 4841 } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) { 4842 struct hns3_tx_spare *tx_spare = ring->tx_spare; 4843 4844 dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len, 4845 DMA_TO_DEVICE); 4846 free_pages((unsigned long)tx_spare->buf, 4847 get_order(tx_spare->len)); 4848 devm_kfree(ring_to_dev(ring), tx_spare); 4849 ring->tx_spare = NULL; 4850 } 4851 4852 if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) { 4853 page_pool_destroy(ring->page_pool); 4854 ring->page_pool = NULL; 4855 } 4856 } 4857 4858 static int hns3_buf_size2type(u32 buf_size) 4859 { 4860 int bd_size_type; 4861 4862 switch (buf_size) { 4863 case 512: 4864 bd_size_type = HNS3_BD_SIZE_512_TYPE; 4865 break; 4866 case 1024: 4867 bd_size_type = HNS3_BD_SIZE_1024_TYPE; 4868 break; 4869 case 2048: 4870 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 4871 break; 4872 case 4096: 4873 bd_size_type = HNS3_BD_SIZE_4096_TYPE; 4874 break; 4875 default: 4876 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 4877 } 4878 4879 return bd_size_type; 4880 } 4881 4882 static void hns3_init_ring_hw(struct hns3_enet_ring *ring) 4883 { 4884 dma_addr_t dma = ring->desc_dma_addr; 4885 struct hnae3_queue *q = ring->tqp; 4886 4887 if (!HNAE3_IS_TX_RING(ring)) { 4888 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma); 4889 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, 4890 (u32)((dma >> 31) >> 1)); 4891 4892 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, 4893 hns3_buf_size2type(ring->buf_size)); 4894 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, 4895 ring->desc_num / 8 - 1); 4896 } else { 4897 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, 4898 (u32)dma); 4899 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, 4900 (u32)((dma >> 31) >> 1)); 4901 4902 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, 4903 ring->desc_num / 8 - 1); 4904 } 4905 } 4906 4907 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) 4908 { 4909 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 4910 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 4911 int i; 4912 4913 for (i = 0; i < tc_info->num_tc; i++) { 4914 int j; 4915 4916 for (j = 0; j < tc_info->tqp_count[i]; j++) { 4917 struct hnae3_queue *q; 4918 4919 q = priv->ring[tc_info->tqp_offset[i] + j].tqp; 4920 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i); 4921 } 4922 } 4923 } 4924 4925 int hns3_init_all_ring(struct hns3_nic_priv *priv) 4926 { 4927 struct hnae3_handle *h = priv->ae_handle; 4928 int ring_num = h->kinfo.num_tqps * 2; 4929 int i, j; 4930 int ret; 4931 4932 for (i = 0; i < ring_num; i++) { 4933 ret = hns3_alloc_ring_memory(&priv->ring[i]); 4934 if (ret) { 4935 dev_err(priv->dev, 4936 "Alloc ring memory fail! ret=%d\n", ret); 4937 goto out_when_alloc_ring_memory; 4938 } 4939 4940 u64_stats_init(&priv->ring[i].syncp); 4941 } 4942 4943 return 0; 4944 4945 out_when_alloc_ring_memory: 4946 for (j = i - 1; j >= 0; j--) 4947 hns3_fini_ring(&priv->ring[j]); 4948 4949 return -ENOMEM; 4950 } 4951 4952 static void hns3_uninit_all_ring(struct hns3_nic_priv *priv) 4953 { 4954 struct hnae3_handle *h = priv->ae_handle; 4955 int i; 4956 4957 for (i = 0; i < h->kinfo.num_tqps; i++) { 4958 hns3_fini_ring(&priv->ring[i]); 4959 hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); 4960 } 4961 } 4962 4963 /* Set mac addr if it is configured. or leave it to the AE driver */ 4964 static int hns3_init_mac_addr(struct net_device *netdev) 4965 { 4966 struct hns3_nic_priv *priv = netdev_priv(netdev); 4967 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 4968 struct hnae3_handle *h = priv->ae_handle; 4969 u8 mac_addr_temp[ETH_ALEN]; 4970 int ret = 0; 4971 4972 if (h->ae_algo->ops->get_mac_addr) 4973 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); 4974 4975 /* Check if the MAC address is valid, if not get a random one */ 4976 if (!is_valid_ether_addr(mac_addr_temp)) { 4977 eth_hw_addr_random(netdev); 4978 hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr); 4979 dev_warn(priv->dev, "using random MAC address %s\n", 4980 format_mac_addr); 4981 } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { 4982 eth_hw_addr_set(netdev, mac_addr_temp); 4983 ether_addr_copy(netdev->perm_addr, mac_addr_temp); 4984 } else { 4985 return 0; 4986 } 4987 4988 if (h->ae_algo->ops->set_mac_addr) 4989 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); 4990 4991 return ret; 4992 } 4993 4994 static int hns3_init_phy(struct net_device *netdev) 4995 { 4996 struct hnae3_handle *h = hns3_get_handle(netdev); 4997 int ret = 0; 4998 4999 if (h->ae_algo->ops->mac_connect_phy) 5000 ret = h->ae_algo->ops->mac_connect_phy(h); 5001 5002 return ret; 5003 } 5004 5005 static void hns3_uninit_phy(struct net_device *netdev) 5006 { 5007 struct hnae3_handle *h = hns3_get_handle(netdev); 5008 5009 if (h->ae_algo->ops->mac_disconnect_phy) 5010 h->ae_algo->ops->mac_disconnect_phy(h); 5011 } 5012 5013 static int hns3_client_start(struct hnae3_handle *handle) 5014 { 5015 if (!handle->ae_algo->ops->client_start) 5016 return 0; 5017 5018 return handle->ae_algo->ops->client_start(handle); 5019 } 5020 5021 static void hns3_client_stop(struct hnae3_handle *handle) 5022 { 5023 if (!handle->ae_algo->ops->client_stop) 5024 return; 5025 5026 handle->ae_algo->ops->client_stop(handle); 5027 } 5028 5029 static void hns3_info_show(struct hns3_nic_priv *priv) 5030 { 5031 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 5032 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 5033 5034 hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr); 5035 dev_info(priv->dev, "MAC address: %s\n", format_mac_addr); 5036 dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); 5037 dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); 5038 dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); 5039 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); 5040 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); 5041 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); 5042 dev_info(priv->dev, "Total number of enabled TCs: %u\n", 5043 kinfo->tc_info.num_tc); 5044 dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); 5045 } 5046 5047 static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv, 5048 enum dim_cq_period_mode mode, bool is_tx) 5049 { 5050 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); 5051 struct hnae3_handle *handle = priv->ae_handle; 5052 int i; 5053 5054 if (is_tx) { 5055 priv->tx_cqe_mode = mode; 5056 5057 for (i = 0; i < priv->vector_num; i++) 5058 priv->tqp_vector[i].tx_group.dim.mode = mode; 5059 } else { 5060 priv->rx_cqe_mode = mode; 5061 5062 for (i = 0; i < priv->vector_num; i++) 5063 priv->tqp_vector[i].rx_group.dim.mode = mode; 5064 } 5065 5066 /* only device version above V3(include V3), GL can switch CQ/EQ 5067 * period mode. 5068 */ 5069 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { 5070 u32 new_mode; 5071 u64 reg; 5072 5073 new_mode = (mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) ? 5074 HNS3_CQ_MODE_CQE : HNS3_CQ_MODE_EQE; 5075 reg = is_tx ? HNS3_GL1_CQ_MODE_REG : HNS3_GL0_CQ_MODE_REG; 5076 5077 writel(new_mode, handle->kinfo.io_base + reg); 5078 } 5079 } 5080 5081 void hns3_cq_period_mode_init(struct hns3_nic_priv *priv, 5082 enum dim_cq_period_mode tx_mode, 5083 enum dim_cq_period_mode rx_mode) 5084 { 5085 hns3_set_cq_period_mode(priv, tx_mode, true); 5086 hns3_set_cq_period_mode(priv, rx_mode, false); 5087 } 5088 5089 static void hns3_state_init(struct hnae3_handle *handle) 5090 { 5091 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 5092 struct net_device *netdev = handle->kinfo.netdev; 5093 struct hns3_nic_priv *priv = netdev_priv(netdev); 5094 5095 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 5096 5097 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 5098 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); 5099 5100 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) 5101 set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); 5102 5103 if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev)) 5104 set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); 5105 } 5106 5107 static int hns3_client_init(struct hnae3_handle *handle) 5108 { 5109 struct pci_dev *pdev = handle->pdev; 5110 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 5111 u16 alloc_tqps, max_rss_size; 5112 struct hns3_nic_priv *priv; 5113 struct net_device *netdev; 5114 int ret; 5115 5116 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, 5117 &max_rss_size); 5118 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); 5119 if (!netdev) 5120 return -ENOMEM; 5121 5122 priv = netdev_priv(netdev); 5123 priv->dev = &pdev->dev; 5124 priv->netdev = netdev; 5125 priv->ae_handle = handle; 5126 priv->tx_timeout_count = 0; 5127 priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; 5128 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 5129 5130 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); 5131 5132 handle->kinfo.netdev = netdev; 5133 handle->priv = (void *)priv; 5134 5135 hns3_init_mac_addr(netdev); 5136 5137 hns3_set_default_feature(netdev); 5138 5139 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; 5140 netdev->priv_flags |= IFF_UNICAST_FLT; 5141 netdev->netdev_ops = &hns3_nic_netdev_ops; 5142 SET_NETDEV_DEV(netdev, &pdev->dev); 5143 hns3_ethtool_set_ops(netdev); 5144 5145 /* Carrier off reporting is important to ethtool even BEFORE open */ 5146 netif_carrier_off(netdev); 5147 5148 ret = hns3_get_ring_config(priv); 5149 if (ret) { 5150 ret = -ENOMEM; 5151 goto out_get_ring_cfg; 5152 } 5153 5154 hns3_nic_init_coal_cfg(priv); 5155 5156 ret = hns3_nic_alloc_vector_data(priv); 5157 if (ret) { 5158 ret = -ENOMEM; 5159 goto out_alloc_vector_data; 5160 } 5161 5162 ret = hns3_nic_init_vector_data(priv); 5163 if (ret) { 5164 ret = -ENOMEM; 5165 goto out_init_vector_data; 5166 } 5167 5168 ret = hns3_init_all_ring(priv); 5169 if (ret) { 5170 ret = -ENOMEM; 5171 goto out_init_ring; 5172 } 5173 5174 hns3_cq_period_mode_init(priv, DIM_CQ_PERIOD_MODE_START_FROM_EQE, 5175 DIM_CQ_PERIOD_MODE_START_FROM_EQE); 5176 5177 ret = hns3_init_phy(netdev); 5178 if (ret) 5179 goto out_init_phy; 5180 5181 /* the device can work without cpu rmap, only aRFS needs it */ 5182 ret = hns3_set_rx_cpu_rmap(netdev); 5183 if (ret) 5184 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); 5185 5186 ret = hns3_nic_init_irq(priv); 5187 if (ret) { 5188 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); 5189 hns3_free_rx_cpu_rmap(netdev); 5190 goto out_init_irq_fail; 5191 } 5192 5193 ret = hns3_client_start(handle); 5194 if (ret) { 5195 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 5196 goto out_client_start; 5197 } 5198 5199 hns3_dcbnl_setup(handle); 5200 5201 ret = hns3_dbg_init(handle); 5202 if (ret) { 5203 dev_err(priv->dev, "failed to init debugfs, ret = %d\n", 5204 ret); 5205 goto out_client_start; 5206 } 5207 5208 netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size); 5209 5210 hns3_state_init(handle); 5211 5212 ret = register_netdev(netdev); 5213 if (ret) { 5214 dev_err(priv->dev, "probe register netdev fail!\n"); 5215 goto out_reg_netdev_fail; 5216 } 5217 5218 if (netif_msg_drv(handle)) 5219 hns3_info_show(priv); 5220 5221 return ret; 5222 5223 out_reg_netdev_fail: 5224 hns3_dbg_uninit(handle); 5225 out_client_start: 5226 hns3_free_rx_cpu_rmap(netdev); 5227 hns3_nic_uninit_irq(priv); 5228 out_init_irq_fail: 5229 hns3_uninit_phy(netdev); 5230 out_init_phy: 5231 hns3_uninit_all_ring(priv); 5232 out_init_ring: 5233 hns3_nic_uninit_vector_data(priv); 5234 out_init_vector_data: 5235 hns3_nic_dealloc_vector_data(priv); 5236 out_alloc_vector_data: 5237 priv->ring = NULL; 5238 out_get_ring_cfg: 5239 priv->ae_handle = NULL; 5240 free_netdev(netdev); 5241 return ret; 5242 } 5243 5244 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) 5245 { 5246 struct net_device *netdev = handle->kinfo.netdev; 5247 struct hns3_nic_priv *priv = netdev_priv(netdev); 5248 5249 if (netdev->reg_state != NETREG_UNINITIALIZED) 5250 unregister_netdev(netdev); 5251 5252 hns3_client_stop(handle); 5253 5254 hns3_uninit_phy(netdev); 5255 5256 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 5257 netdev_warn(netdev, "already uninitialized\n"); 5258 goto out_netdev_free; 5259 } 5260 5261 hns3_free_rx_cpu_rmap(netdev); 5262 5263 hns3_nic_uninit_irq(priv); 5264 5265 hns3_clear_all_ring(handle, true); 5266 5267 hns3_nic_uninit_vector_data(priv); 5268 5269 hns3_nic_dealloc_vector_data(priv); 5270 5271 hns3_uninit_all_ring(priv); 5272 5273 hns3_put_ring_config(priv); 5274 5275 out_netdev_free: 5276 hns3_dbg_uninit(handle); 5277 free_netdev(netdev); 5278 } 5279 5280 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) 5281 { 5282 struct net_device *netdev = handle->kinfo.netdev; 5283 5284 if (!netdev) 5285 return; 5286 5287 if (linkup) { 5288 netif_tx_wake_all_queues(netdev); 5289 netif_carrier_on(netdev); 5290 if (netif_msg_link(handle)) 5291 netdev_info(netdev, "link up\n"); 5292 } else { 5293 netif_carrier_off(netdev); 5294 netif_tx_stop_all_queues(netdev); 5295 if (netif_msg_link(handle)) 5296 netdev_info(netdev, "link down\n"); 5297 } 5298 } 5299 5300 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) 5301 { 5302 while (ring->next_to_clean != ring->next_to_use) { 5303 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; 5304 hns3_free_buffer_detach(ring, ring->next_to_clean, 0); 5305 ring_ptr_move_fw(ring, next_to_clean); 5306 } 5307 5308 ring->pending_buf = 0; 5309 } 5310 5311 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) 5312 { 5313 struct hns3_desc_cb res_cbs; 5314 int ret; 5315 5316 while (ring->next_to_use != ring->next_to_clean) { 5317 /* When a buffer is not reused, it's memory has been 5318 * freed in hns3_handle_rx_bd or will be freed by 5319 * stack, so we need to replace the buffer here. 5320 */ 5321 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 5322 ret = hns3_alloc_and_map_buffer(ring, &res_cbs); 5323 if (ret) { 5324 hns3_ring_stats_update(ring, sw_err_cnt); 5325 /* if alloc new buffer fail, exit directly 5326 * and reclear in up flow. 5327 */ 5328 netdev_warn(ring_to_netdev(ring), 5329 "reserve buffer map failed, ret = %d\n", 5330 ret); 5331 return ret; 5332 } 5333 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 5334 } 5335 ring_ptr_move_fw(ring, next_to_use); 5336 } 5337 5338 /* Free the pending skb in rx ring */ 5339 if (ring->skb) { 5340 dev_kfree_skb_any(ring->skb); 5341 ring->skb = NULL; 5342 ring->pending_buf = 0; 5343 } 5344 5345 return 0; 5346 } 5347 5348 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) 5349 { 5350 while (ring->next_to_use != ring->next_to_clean) { 5351 /* When a buffer is not reused, it's memory has been 5352 * freed in hns3_handle_rx_bd or will be freed by 5353 * stack, so only need to unmap the buffer here. 5354 */ 5355 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 5356 hns3_unmap_buffer(ring, 5357 &ring->desc_cb[ring->next_to_use]); 5358 ring->desc_cb[ring->next_to_use].dma = 0; 5359 } 5360 5361 ring_ptr_move_fw(ring, next_to_use); 5362 } 5363 } 5364 5365 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force) 5366 { 5367 struct net_device *ndev = h->kinfo.netdev; 5368 struct hns3_nic_priv *priv = netdev_priv(ndev); 5369 u32 i; 5370 5371 for (i = 0; i < h->kinfo.num_tqps; i++) { 5372 struct hns3_enet_ring *ring; 5373 5374 ring = &priv->ring[i]; 5375 hns3_clear_tx_ring(ring); 5376 5377 ring = &priv->ring[i + h->kinfo.num_tqps]; 5378 /* Continue to clear other rings even if clearing some 5379 * rings failed. 5380 */ 5381 if (force) 5382 hns3_force_clear_rx_ring(ring); 5383 else 5384 hns3_clear_rx_ring(ring); 5385 } 5386 } 5387 5388 int hns3_nic_reset_all_ring(struct hnae3_handle *h) 5389 { 5390 struct net_device *ndev = h->kinfo.netdev; 5391 struct hns3_nic_priv *priv = netdev_priv(ndev); 5392 struct hns3_enet_ring *rx_ring; 5393 int i, j; 5394 int ret; 5395 5396 ret = h->ae_algo->ops->reset_queue(h); 5397 if (ret) 5398 return ret; 5399 5400 for (i = 0; i < h->kinfo.num_tqps; i++) { 5401 hns3_init_ring_hw(&priv->ring[i]); 5402 5403 /* We need to clear tx ring here because self test will 5404 * use the ring and will not run down before up 5405 */ 5406 hns3_clear_tx_ring(&priv->ring[i]); 5407 priv->ring[i].next_to_clean = 0; 5408 priv->ring[i].next_to_use = 0; 5409 priv->ring[i].last_to_use = 0; 5410 5411 rx_ring = &priv->ring[i + h->kinfo.num_tqps]; 5412 hns3_init_ring_hw(rx_ring); 5413 ret = hns3_clear_rx_ring(rx_ring); 5414 if (ret) 5415 return ret; 5416 5417 /* We can not know the hardware head and tail when this 5418 * function is called in reset flow, so we reuse all desc. 5419 */ 5420 for (j = 0; j < rx_ring->desc_num; j++) 5421 hns3_reuse_buffer(rx_ring, j); 5422 5423 rx_ring->next_to_clean = 0; 5424 rx_ring->next_to_use = 0; 5425 } 5426 5427 hns3_init_tx_ring_tc(priv); 5428 5429 return 0; 5430 } 5431 5432 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) 5433 { 5434 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 5435 struct net_device *ndev = kinfo->netdev; 5436 struct hns3_nic_priv *priv = netdev_priv(ndev); 5437 5438 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) 5439 return 0; 5440 5441 if (!netif_running(ndev)) 5442 return 0; 5443 5444 return hns3_nic_net_stop(ndev); 5445 } 5446 5447 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) 5448 { 5449 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 5450 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); 5451 int ret = 0; 5452 5453 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 5454 netdev_err(kinfo->netdev, "device is not initialized yet\n"); 5455 return -EFAULT; 5456 } 5457 5458 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 5459 5460 if (netif_running(kinfo->netdev)) { 5461 ret = hns3_nic_net_open(kinfo->netdev); 5462 if (ret) { 5463 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 5464 netdev_err(kinfo->netdev, 5465 "net up fail, ret=%d!\n", ret); 5466 return ret; 5467 } 5468 } 5469 5470 return ret; 5471 } 5472 5473 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) 5474 { 5475 struct net_device *netdev = handle->kinfo.netdev; 5476 struct hns3_nic_priv *priv = netdev_priv(netdev); 5477 int ret; 5478 5479 /* Carrier off reporting is important to ethtool even BEFORE open */ 5480 netif_carrier_off(netdev); 5481 5482 ret = hns3_get_ring_config(priv); 5483 if (ret) 5484 return ret; 5485 5486 ret = hns3_nic_alloc_vector_data(priv); 5487 if (ret) 5488 goto err_put_ring; 5489 5490 ret = hns3_nic_init_vector_data(priv); 5491 if (ret) 5492 goto err_dealloc_vector; 5493 5494 ret = hns3_init_all_ring(priv); 5495 if (ret) 5496 goto err_uninit_vector; 5497 5498 hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode); 5499 5500 /* the device can work without cpu rmap, only aRFS needs it */ 5501 ret = hns3_set_rx_cpu_rmap(netdev); 5502 if (ret) 5503 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); 5504 5505 ret = hns3_nic_init_irq(priv); 5506 if (ret) { 5507 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); 5508 hns3_free_rx_cpu_rmap(netdev); 5509 goto err_init_irq_fail; 5510 } 5511 5512 if (!hns3_is_phys_func(handle->pdev)) 5513 hns3_init_mac_addr(netdev); 5514 5515 ret = hns3_client_start(handle); 5516 if (ret) { 5517 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 5518 goto err_client_start_fail; 5519 } 5520 5521 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 5522 5523 return ret; 5524 5525 err_client_start_fail: 5526 hns3_free_rx_cpu_rmap(netdev); 5527 hns3_nic_uninit_irq(priv); 5528 err_init_irq_fail: 5529 hns3_uninit_all_ring(priv); 5530 err_uninit_vector: 5531 hns3_nic_uninit_vector_data(priv); 5532 err_dealloc_vector: 5533 hns3_nic_dealloc_vector_data(priv); 5534 err_put_ring: 5535 hns3_put_ring_config(priv); 5536 5537 return ret; 5538 } 5539 5540 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) 5541 { 5542 struct net_device *netdev = handle->kinfo.netdev; 5543 struct hns3_nic_priv *priv = netdev_priv(netdev); 5544 5545 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 5546 netdev_warn(netdev, "already uninitialized\n"); 5547 return 0; 5548 } 5549 5550 hns3_free_rx_cpu_rmap(netdev); 5551 hns3_nic_uninit_irq(priv); 5552 hns3_clear_all_ring(handle, true); 5553 hns3_reset_tx_queue(priv->ae_handle); 5554 5555 hns3_nic_uninit_vector_data(priv); 5556 5557 hns3_nic_dealloc_vector_data(priv); 5558 5559 hns3_uninit_all_ring(priv); 5560 5561 hns3_put_ring_config(priv); 5562 5563 return 0; 5564 } 5565 5566 int hns3_reset_notify(struct hnae3_handle *handle, 5567 enum hnae3_reset_notify_type type) 5568 { 5569 int ret = 0; 5570 5571 switch (type) { 5572 case HNAE3_UP_CLIENT: 5573 ret = hns3_reset_notify_up_enet(handle); 5574 break; 5575 case HNAE3_DOWN_CLIENT: 5576 ret = hns3_reset_notify_down_enet(handle); 5577 break; 5578 case HNAE3_INIT_CLIENT: 5579 ret = hns3_reset_notify_init_enet(handle); 5580 break; 5581 case HNAE3_UNINIT_CLIENT: 5582 ret = hns3_reset_notify_uninit_enet(handle); 5583 break; 5584 default: 5585 break; 5586 } 5587 5588 return ret; 5589 } 5590 5591 static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num, 5592 bool rxfh_configured) 5593 { 5594 int ret; 5595 5596 ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, 5597 rxfh_configured); 5598 if (ret) { 5599 dev_err(&handle->pdev->dev, 5600 "Change tqp num(%u) fail.\n", new_tqp_num); 5601 return ret; 5602 } 5603 5604 ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT); 5605 if (ret) 5606 return ret; 5607 5608 ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT); 5609 if (ret) 5610 hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT); 5611 5612 return ret; 5613 } 5614 5615 int hns3_set_channels(struct net_device *netdev, 5616 struct ethtool_channels *ch) 5617 { 5618 struct hnae3_handle *h = hns3_get_handle(netdev); 5619 struct hnae3_knic_private_info *kinfo = &h->kinfo; 5620 bool rxfh_configured = netif_is_rxfh_configured(netdev); 5621 u32 new_tqp_num = ch->combined_count; 5622 u16 org_tqp_num; 5623 int ret; 5624 5625 if (hns3_nic_resetting(netdev)) 5626 return -EBUSY; 5627 5628 if (ch->rx_count || ch->tx_count) 5629 return -EINVAL; 5630 5631 if (kinfo->tc_info.mqprio_active) { 5632 dev_err(&netdev->dev, 5633 "it's not allowed to set channels via ethtool when MQPRIO mode is on\n"); 5634 return -EINVAL; 5635 } 5636 5637 if (new_tqp_num > hns3_get_max_available_channels(h) || 5638 new_tqp_num < 1) { 5639 dev_err(&netdev->dev, 5640 "Change tqps fail, the tqp range is from 1 to %u", 5641 hns3_get_max_available_channels(h)); 5642 return -EINVAL; 5643 } 5644 5645 if (kinfo->rss_size == new_tqp_num) 5646 return 0; 5647 5648 netif_dbg(h, drv, netdev, 5649 "set channels: tqp_num=%u, rxfh=%d\n", 5650 new_tqp_num, rxfh_configured); 5651 5652 ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); 5653 if (ret) 5654 return ret; 5655 5656 ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); 5657 if (ret) 5658 return ret; 5659 5660 org_tqp_num = h->kinfo.num_tqps; 5661 ret = hns3_change_channels(h, new_tqp_num, rxfh_configured); 5662 if (ret) { 5663 int ret1; 5664 5665 netdev_warn(netdev, 5666 "Change channels fail, revert to old value\n"); 5667 ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured); 5668 if (ret1) { 5669 netdev_err(netdev, 5670 "revert to old channel fail\n"); 5671 return ret1; 5672 } 5673 5674 return ret; 5675 } 5676 5677 return 0; 5678 } 5679 5680 static const struct hns3_hw_error_info hns3_hw_err[] = { 5681 { .type = HNAE3_PPU_POISON_ERROR, 5682 .msg = "PPU poison" }, 5683 { .type = HNAE3_CMDQ_ECC_ERROR, 5684 .msg = "IMP CMDQ error" }, 5685 { .type = HNAE3_IMP_RD_POISON_ERROR, 5686 .msg = "IMP RD poison" }, 5687 { .type = HNAE3_ROCEE_AXI_RESP_ERROR, 5688 .msg = "ROCEE AXI RESP error" }, 5689 }; 5690 5691 static void hns3_process_hw_error(struct hnae3_handle *handle, 5692 enum hnae3_hw_error_type type) 5693 { 5694 int i; 5695 5696 for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) { 5697 if (hns3_hw_err[i].type == type) { 5698 dev_err(&handle->pdev->dev, "Detected %s!\n", 5699 hns3_hw_err[i].msg); 5700 break; 5701 } 5702 } 5703 } 5704 5705 static const struct hnae3_client_ops client_ops = { 5706 .init_instance = hns3_client_init, 5707 .uninit_instance = hns3_client_uninit, 5708 .link_status_change = hns3_link_status_change, 5709 .reset_notify = hns3_reset_notify, 5710 .process_hw_error = hns3_process_hw_error, 5711 }; 5712 5713 /* hns3_init_module - Driver registration routine 5714 * hns3_init_module is the first routine called when the driver is 5715 * loaded. All it does is register with the PCI subsystem. 5716 */ 5717 static int __init hns3_init_module(void) 5718 { 5719 int ret; 5720 5721 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); 5722 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); 5723 5724 client.type = HNAE3_CLIENT_KNIC; 5725 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s", 5726 hns3_driver_name); 5727 5728 client.ops = &client_ops; 5729 5730 INIT_LIST_HEAD(&client.node); 5731 5732 hns3_dbg_register_debugfs(hns3_driver_name); 5733 5734 ret = hnae3_register_client(&client); 5735 if (ret) 5736 goto err_reg_client; 5737 5738 ret = pci_register_driver(&hns3_driver); 5739 if (ret) 5740 goto err_reg_driver; 5741 5742 return ret; 5743 5744 err_reg_driver: 5745 hnae3_unregister_client(&client); 5746 err_reg_client: 5747 hns3_dbg_unregister_debugfs(); 5748 return ret; 5749 } 5750 module_init(hns3_init_module); 5751 5752 /* hns3_exit_module - Driver exit cleanup routine 5753 * hns3_exit_module is called just before the driver is removed 5754 * from memory. 5755 */ 5756 static void __exit hns3_exit_module(void) 5757 { 5758 pci_unregister_driver(&hns3_driver); 5759 hnae3_unregister_client(&client); 5760 hns3_dbg_unregister_debugfs(); 5761 } 5762 module_exit(hns3_exit_module); 5763 5764 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); 5765 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 5766 MODULE_LICENSE("GPL"); 5767 MODULE_ALIAS("pci:hns-nic"); 5768