1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/dma-mapping.h> 5 #include <linux/etherdevice.h> 6 #include <linux/interrupt.h> 7 #ifdef CONFIG_RFS_ACCEL 8 #include <linux/cpu_rmap.h> 9 #endif 10 #include <linux/if_vlan.h> 11 #include <linux/irq.h> 12 #include <linux/ip.h> 13 #include <linux/ipv6.h> 14 #include <linux/module.h> 15 #include <linux/pci.h> 16 #include <linux/aer.h> 17 #include <linux/skbuff.h> 18 #include <linux/sctp.h> 19 #include <net/gre.h> 20 #include <net/gro.h> 21 #include <net/ip6_checksum.h> 22 #include <net/pkt_cls.h> 23 #include <net/tcp.h> 24 #include <net/vxlan.h> 25 #include <net/geneve.h> 26 27 #include "hnae3.h" 28 #include "hns3_enet.h" 29 /* All hns3 tracepoints are defined by the include below, which 30 * must be included exactly once across the whole kernel with 31 * CREATE_TRACE_POINTS defined 32 */ 33 #define CREATE_TRACE_POINTS 34 #include "hns3_trace.h" 35 36 #define hns3_set_field(origin, shift, val) ((origin) |= (val) << (shift)) 37 #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) 38 39 #define hns3_rl_err(fmt, ...) \ 40 do { \ 41 if (net_ratelimit()) \ 42 netdev_err(fmt, ##__VA_ARGS__); \ 43 } while (0) 44 45 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); 46 47 static const char hns3_driver_name[] = "hns3"; 48 static const char hns3_driver_string[] = 49 "Hisilicon Ethernet Network Driver for Hip08 Family"; 50 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; 51 static struct hnae3_client client; 52 53 static int debug = -1; 54 module_param(debug, int, 0); 55 MODULE_PARM_DESC(debug, " Network interface message level setting"); 56 57 static unsigned int tx_sgl = 1; 58 module_param(tx_sgl, uint, 0600); 59 MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping"); 60 61 static bool page_pool_enabled = true; 62 module_param(page_pool_enabled, bool, 0400); 63 64 #define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \ 65 sizeof(struct sg_table)) 66 #define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \ 67 dma_get_cache_alignment()) 68 69 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 70 NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) 71 72 #define HNS3_INNER_VLAN_TAG 1 73 #define HNS3_OUTER_VLAN_TAG 2 74 75 #define HNS3_MIN_TX_LEN 33U 76 #define HNS3_MIN_TUN_PKT_LEN 65U 77 78 /* hns3_pci_tbl - PCI Device ID Table 79 * 80 * Last entry must be all 0s 81 * 82 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 83 * Class, Class Mask, private data (not used) } 84 */ 85 static const struct pci_device_id hns3_pci_tbl[] = { 86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 88 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 89 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 90 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 91 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 92 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 93 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 94 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 95 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 96 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 97 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 98 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 99 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 100 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 101 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 102 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 103 /* required last entry */ 104 {0,} 105 }; 106 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); 107 108 #define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t) \ 109 { ptype, \ 110 l, \ 111 CHECKSUM_##s, \ 112 HNS3_L3_TYPE_##t, \ 113 1 } 114 115 #define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \ 116 { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0 } 117 118 static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = { 119 HNS3_RX_PTYPE_UNUSED_ENTRY(0), 120 HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP), 121 HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP), 122 HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP), 123 HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL), 124 HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL), 125 HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL), 126 HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM), 127 HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL), 128 HNS3_RX_PTYPE_UNUSED_ENTRY(9), 129 HNS3_RX_PTYPE_UNUSED_ENTRY(10), 130 HNS3_RX_PTYPE_UNUSED_ENTRY(11), 131 HNS3_RX_PTYPE_UNUSED_ENTRY(12), 132 HNS3_RX_PTYPE_UNUSED_ENTRY(13), 133 HNS3_RX_PTYPE_UNUSED_ENTRY(14), 134 HNS3_RX_PTYPE_UNUSED_ENTRY(15), 135 HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL), 136 HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4), 137 HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4), 138 HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4), 139 HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4), 140 HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4), 141 HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4), 142 HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4), 143 HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4), 144 HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4), 145 HNS3_RX_PTYPE_UNUSED_ENTRY(26), 146 HNS3_RX_PTYPE_UNUSED_ENTRY(27), 147 HNS3_RX_PTYPE_UNUSED_ENTRY(28), 148 HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL), 149 HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL), 150 HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4), 151 HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4), 152 HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4), 153 HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4), 154 HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4), 155 HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4), 156 HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4), 157 HNS3_RX_PTYPE_UNUSED_ENTRY(38), 158 HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6), 159 HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6), 160 HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6), 161 HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6), 162 HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6), 163 HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6), 164 HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6), 165 HNS3_RX_PTYPE_UNUSED_ENTRY(46), 166 HNS3_RX_PTYPE_UNUSED_ENTRY(47), 167 HNS3_RX_PTYPE_UNUSED_ENTRY(48), 168 HNS3_RX_PTYPE_UNUSED_ENTRY(49), 169 HNS3_RX_PTYPE_UNUSED_ENTRY(50), 170 HNS3_RX_PTYPE_UNUSED_ENTRY(51), 171 HNS3_RX_PTYPE_UNUSED_ENTRY(52), 172 HNS3_RX_PTYPE_UNUSED_ENTRY(53), 173 HNS3_RX_PTYPE_UNUSED_ENTRY(54), 174 HNS3_RX_PTYPE_UNUSED_ENTRY(55), 175 HNS3_RX_PTYPE_UNUSED_ENTRY(56), 176 HNS3_RX_PTYPE_UNUSED_ENTRY(57), 177 HNS3_RX_PTYPE_UNUSED_ENTRY(58), 178 HNS3_RX_PTYPE_UNUSED_ENTRY(59), 179 HNS3_RX_PTYPE_UNUSED_ENTRY(60), 180 HNS3_RX_PTYPE_UNUSED_ENTRY(61), 181 HNS3_RX_PTYPE_UNUSED_ENTRY(62), 182 HNS3_RX_PTYPE_UNUSED_ENTRY(63), 183 HNS3_RX_PTYPE_UNUSED_ENTRY(64), 184 HNS3_RX_PTYPE_UNUSED_ENTRY(65), 185 HNS3_RX_PTYPE_UNUSED_ENTRY(66), 186 HNS3_RX_PTYPE_UNUSED_ENTRY(67), 187 HNS3_RX_PTYPE_UNUSED_ENTRY(68), 188 HNS3_RX_PTYPE_UNUSED_ENTRY(69), 189 HNS3_RX_PTYPE_UNUSED_ENTRY(70), 190 HNS3_RX_PTYPE_UNUSED_ENTRY(71), 191 HNS3_RX_PTYPE_UNUSED_ENTRY(72), 192 HNS3_RX_PTYPE_UNUSED_ENTRY(73), 193 HNS3_RX_PTYPE_UNUSED_ENTRY(74), 194 HNS3_RX_PTYPE_UNUSED_ENTRY(75), 195 HNS3_RX_PTYPE_UNUSED_ENTRY(76), 196 HNS3_RX_PTYPE_UNUSED_ENTRY(77), 197 HNS3_RX_PTYPE_UNUSED_ENTRY(78), 198 HNS3_RX_PTYPE_UNUSED_ENTRY(79), 199 HNS3_RX_PTYPE_UNUSED_ENTRY(80), 200 HNS3_RX_PTYPE_UNUSED_ENTRY(81), 201 HNS3_RX_PTYPE_UNUSED_ENTRY(82), 202 HNS3_RX_PTYPE_UNUSED_ENTRY(83), 203 HNS3_RX_PTYPE_UNUSED_ENTRY(84), 204 HNS3_RX_PTYPE_UNUSED_ENTRY(85), 205 HNS3_RX_PTYPE_UNUSED_ENTRY(86), 206 HNS3_RX_PTYPE_UNUSED_ENTRY(87), 207 HNS3_RX_PTYPE_UNUSED_ENTRY(88), 208 HNS3_RX_PTYPE_UNUSED_ENTRY(89), 209 HNS3_RX_PTYPE_UNUSED_ENTRY(90), 210 HNS3_RX_PTYPE_UNUSED_ENTRY(91), 211 HNS3_RX_PTYPE_UNUSED_ENTRY(92), 212 HNS3_RX_PTYPE_UNUSED_ENTRY(93), 213 HNS3_RX_PTYPE_UNUSED_ENTRY(94), 214 HNS3_RX_PTYPE_UNUSED_ENTRY(95), 215 HNS3_RX_PTYPE_UNUSED_ENTRY(96), 216 HNS3_RX_PTYPE_UNUSED_ENTRY(97), 217 HNS3_RX_PTYPE_UNUSED_ENTRY(98), 218 HNS3_RX_PTYPE_UNUSED_ENTRY(99), 219 HNS3_RX_PTYPE_UNUSED_ENTRY(100), 220 HNS3_RX_PTYPE_UNUSED_ENTRY(101), 221 HNS3_RX_PTYPE_UNUSED_ENTRY(102), 222 HNS3_RX_PTYPE_UNUSED_ENTRY(103), 223 HNS3_RX_PTYPE_UNUSED_ENTRY(104), 224 HNS3_RX_PTYPE_UNUSED_ENTRY(105), 225 HNS3_RX_PTYPE_UNUSED_ENTRY(106), 226 HNS3_RX_PTYPE_UNUSED_ENTRY(107), 227 HNS3_RX_PTYPE_UNUSED_ENTRY(108), 228 HNS3_RX_PTYPE_UNUSED_ENTRY(109), 229 HNS3_RX_PTYPE_UNUSED_ENTRY(110), 230 HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6), 231 HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6), 232 HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6), 233 HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6), 234 HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6), 235 HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6), 236 HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6), 237 HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6), 238 HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6), 239 HNS3_RX_PTYPE_UNUSED_ENTRY(120), 240 HNS3_RX_PTYPE_UNUSED_ENTRY(121), 241 HNS3_RX_PTYPE_UNUSED_ENTRY(122), 242 HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL), 243 HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL), 244 HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4), 245 HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4), 246 HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4), 247 HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4), 248 HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4), 249 HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4), 250 HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4), 251 HNS3_RX_PTYPE_UNUSED_ENTRY(132), 252 HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6), 253 HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6), 254 HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6), 255 HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6), 256 HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6), 257 HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6), 258 HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6), 259 HNS3_RX_PTYPE_UNUSED_ENTRY(140), 260 HNS3_RX_PTYPE_UNUSED_ENTRY(141), 261 HNS3_RX_PTYPE_UNUSED_ENTRY(142), 262 HNS3_RX_PTYPE_UNUSED_ENTRY(143), 263 HNS3_RX_PTYPE_UNUSED_ENTRY(144), 264 HNS3_RX_PTYPE_UNUSED_ENTRY(145), 265 HNS3_RX_PTYPE_UNUSED_ENTRY(146), 266 HNS3_RX_PTYPE_UNUSED_ENTRY(147), 267 HNS3_RX_PTYPE_UNUSED_ENTRY(148), 268 HNS3_RX_PTYPE_UNUSED_ENTRY(149), 269 HNS3_RX_PTYPE_UNUSED_ENTRY(150), 270 HNS3_RX_PTYPE_UNUSED_ENTRY(151), 271 HNS3_RX_PTYPE_UNUSED_ENTRY(152), 272 HNS3_RX_PTYPE_UNUSED_ENTRY(153), 273 HNS3_RX_PTYPE_UNUSED_ENTRY(154), 274 HNS3_RX_PTYPE_UNUSED_ENTRY(155), 275 HNS3_RX_PTYPE_UNUSED_ENTRY(156), 276 HNS3_RX_PTYPE_UNUSED_ENTRY(157), 277 HNS3_RX_PTYPE_UNUSED_ENTRY(158), 278 HNS3_RX_PTYPE_UNUSED_ENTRY(159), 279 HNS3_RX_PTYPE_UNUSED_ENTRY(160), 280 HNS3_RX_PTYPE_UNUSED_ENTRY(161), 281 HNS3_RX_PTYPE_UNUSED_ENTRY(162), 282 HNS3_RX_PTYPE_UNUSED_ENTRY(163), 283 HNS3_RX_PTYPE_UNUSED_ENTRY(164), 284 HNS3_RX_PTYPE_UNUSED_ENTRY(165), 285 HNS3_RX_PTYPE_UNUSED_ENTRY(166), 286 HNS3_RX_PTYPE_UNUSED_ENTRY(167), 287 HNS3_RX_PTYPE_UNUSED_ENTRY(168), 288 HNS3_RX_PTYPE_UNUSED_ENTRY(169), 289 HNS3_RX_PTYPE_UNUSED_ENTRY(170), 290 HNS3_RX_PTYPE_UNUSED_ENTRY(171), 291 HNS3_RX_PTYPE_UNUSED_ENTRY(172), 292 HNS3_RX_PTYPE_UNUSED_ENTRY(173), 293 HNS3_RX_PTYPE_UNUSED_ENTRY(174), 294 HNS3_RX_PTYPE_UNUSED_ENTRY(175), 295 HNS3_RX_PTYPE_UNUSED_ENTRY(176), 296 HNS3_RX_PTYPE_UNUSED_ENTRY(177), 297 HNS3_RX_PTYPE_UNUSED_ENTRY(178), 298 HNS3_RX_PTYPE_UNUSED_ENTRY(179), 299 HNS3_RX_PTYPE_UNUSED_ENTRY(180), 300 HNS3_RX_PTYPE_UNUSED_ENTRY(181), 301 HNS3_RX_PTYPE_UNUSED_ENTRY(182), 302 HNS3_RX_PTYPE_UNUSED_ENTRY(183), 303 HNS3_RX_PTYPE_UNUSED_ENTRY(184), 304 HNS3_RX_PTYPE_UNUSED_ENTRY(185), 305 HNS3_RX_PTYPE_UNUSED_ENTRY(186), 306 HNS3_RX_PTYPE_UNUSED_ENTRY(187), 307 HNS3_RX_PTYPE_UNUSED_ENTRY(188), 308 HNS3_RX_PTYPE_UNUSED_ENTRY(189), 309 HNS3_RX_PTYPE_UNUSED_ENTRY(190), 310 HNS3_RX_PTYPE_UNUSED_ENTRY(191), 311 HNS3_RX_PTYPE_UNUSED_ENTRY(192), 312 HNS3_RX_PTYPE_UNUSED_ENTRY(193), 313 HNS3_RX_PTYPE_UNUSED_ENTRY(194), 314 HNS3_RX_PTYPE_UNUSED_ENTRY(195), 315 HNS3_RX_PTYPE_UNUSED_ENTRY(196), 316 HNS3_RX_PTYPE_UNUSED_ENTRY(197), 317 HNS3_RX_PTYPE_UNUSED_ENTRY(198), 318 HNS3_RX_PTYPE_UNUSED_ENTRY(199), 319 HNS3_RX_PTYPE_UNUSED_ENTRY(200), 320 HNS3_RX_PTYPE_UNUSED_ENTRY(201), 321 HNS3_RX_PTYPE_UNUSED_ENTRY(202), 322 HNS3_RX_PTYPE_UNUSED_ENTRY(203), 323 HNS3_RX_PTYPE_UNUSED_ENTRY(204), 324 HNS3_RX_PTYPE_UNUSED_ENTRY(205), 325 HNS3_RX_PTYPE_UNUSED_ENTRY(206), 326 HNS3_RX_PTYPE_UNUSED_ENTRY(207), 327 HNS3_RX_PTYPE_UNUSED_ENTRY(208), 328 HNS3_RX_PTYPE_UNUSED_ENTRY(209), 329 HNS3_RX_PTYPE_UNUSED_ENTRY(210), 330 HNS3_RX_PTYPE_UNUSED_ENTRY(211), 331 HNS3_RX_PTYPE_UNUSED_ENTRY(212), 332 HNS3_RX_PTYPE_UNUSED_ENTRY(213), 333 HNS3_RX_PTYPE_UNUSED_ENTRY(214), 334 HNS3_RX_PTYPE_UNUSED_ENTRY(215), 335 HNS3_RX_PTYPE_UNUSED_ENTRY(216), 336 HNS3_RX_PTYPE_UNUSED_ENTRY(217), 337 HNS3_RX_PTYPE_UNUSED_ENTRY(218), 338 HNS3_RX_PTYPE_UNUSED_ENTRY(219), 339 HNS3_RX_PTYPE_UNUSED_ENTRY(220), 340 HNS3_RX_PTYPE_UNUSED_ENTRY(221), 341 HNS3_RX_PTYPE_UNUSED_ENTRY(222), 342 HNS3_RX_PTYPE_UNUSED_ENTRY(223), 343 HNS3_RX_PTYPE_UNUSED_ENTRY(224), 344 HNS3_RX_PTYPE_UNUSED_ENTRY(225), 345 HNS3_RX_PTYPE_UNUSED_ENTRY(226), 346 HNS3_RX_PTYPE_UNUSED_ENTRY(227), 347 HNS3_RX_PTYPE_UNUSED_ENTRY(228), 348 HNS3_RX_PTYPE_UNUSED_ENTRY(229), 349 HNS3_RX_PTYPE_UNUSED_ENTRY(230), 350 HNS3_RX_PTYPE_UNUSED_ENTRY(231), 351 HNS3_RX_PTYPE_UNUSED_ENTRY(232), 352 HNS3_RX_PTYPE_UNUSED_ENTRY(233), 353 HNS3_RX_PTYPE_UNUSED_ENTRY(234), 354 HNS3_RX_PTYPE_UNUSED_ENTRY(235), 355 HNS3_RX_PTYPE_UNUSED_ENTRY(236), 356 HNS3_RX_PTYPE_UNUSED_ENTRY(237), 357 HNS3_RX_PTYPE_UNUSED_ENTRY(238), 358 HNS3_RX_PTYPE_UNUSED_ENTRY(239), 359 HNS3_RX_PTYPE_UNUSED_ENTRY(240), 360 HNS3_RX_PTYPE_UNUSED_ENTRY(241), 361 HNS3_RX_PTYPE_UNUSED_ENTRY(242), 362 HNS3_RX_PTYPE_UNUSED_ENTRY(243), 363 HNS3_RX_PTYPE_UNUSED_ENTRY(244), 364 HNS3_RX_PTYPE_UNUSED_ENTRY(245), 365 HNS3_RX_PTYPE_UNUSED_ENTRY(246), 366 HNS3_RX_PTYPE_UNUSED_ENTRY(247), 367 HNS3_RX_PTYPE_UNUSED_ENTRY(248), 368 HNS3_RX_PTYPE_UNUSED_ENTRY(249), 369 HNS3_RX_PTYPE_UNUSED_ENTRY(250), 370 HNS3_RX_PTYPE_UNUSED_ENTRY(251), 371 HNS3_RX_PTYPE_UNUSED_ENTRY(252), 372 HNS3_RX_PTYPE_UNUSED_ENTRY(253), 373 HNS3_RX_PTYPE_UNUSED_ENTRY(254), 374 HNS3_RX_PTYPE_UNUSED_ENTRY(255), 375 }; 376 377 #define HNS3_INVALID_PTYPE \ 378 ARRAY_SIZE(hns3_rx_ptype_tbl) 379 380 static irqreturn_t hns3_irq_handle(int irq, void *vector) 381 { 382 struct hns3_enet_tqp_vector *tqp_vector = vector; 383 384 napi_schedule_irqoff(&tqp_vector->napi); 385 tqp_vector->event_cnt++; 386 387 return IRQ_HANDLED; 388 } 389 390 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) 391 { 392 struct hns3_enet_tqp_vector *tqp_vectors; 393 unsigned int i; 394 395 for (i = 0; i < priv->vector_num; i++) { 396 tqp_vectors = &priv->tqp_vector[i]; 397 398 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) 399 continue; 400 401 /* clear the affinity mask */ 402 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); 403 404 /* release the irq resource */ 405 free_irq(tqp_vectors->vector_irq, tqp_vectors); 406 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; 407 } 408 } 409 410 static int hns3_nic_init_irq(struct hns3_nic_priv *priv) 411 { 412 struct hns3_enet_tqp_vector *tqp_vectors; 413 int txrx_int_idx = 0; 414 int rx_int_idx = 0; 415 int tx_int_idx = 0; 416 unsigned int i; 417 int ret; 418 419 for (i = 0; i < priv->vector_num; i++) { 420 tqp_vectors = &priv->tqp_vector[i]; 421 422 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) 423 continue; 424 425 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { 426 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 427 "%s-%s-%s-%d", hns3_driver_name, 428 pci_name(priv->ae_handle->pdev), 429 "TxRx", txrx_int_idx++); 430 txrx_int_idx++; 431 } else if (tqp_vectors->rx_group.ring) { 432 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 433 "%s-%s-%s-%d", hns3_driver_name, 434 pci_name(priv->ae_handle->pdev), 435 "Rx", rx_int_idx++); 436 } else if (tqp_vectors->tx_group.ring) { 437 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, 438 "%s-%s-%s-%d", hns3_driver_name, 439 pci_name(priv->ae_handle->pdev), 440 "Tx", tx_int_idx++); 441 } else { 442 /* Skip this unused q_vector */ 443 continue; 444 } 445 446 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; 447 448 irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); 449 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, 450 tqp_vectors->name, tqp_vectors); 451 if (ret) { 452 netdev_err(priv->netdev, "request irq(%d) fail\n", 453 tqp_vectors->vector_irq); 454 hns3_nic_uninit_irq(priv); 455 return ret; 456 } 457 458 irq_set_affinity_hint(tqp_vectors->vector_irq, 459 &tqp_vectors->affinity_mask); 460 461 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; 462 } 463 464 return 0; 465 } 466 467 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, 468 u32 mask_en) 469 { 470 writel(mask_en, tqp_vector->mask_addr); 471 } 472 473 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) 474 { 475 napi_enable(&tqp_vector->napi); 476 enable_irq(tqp_vector->vector_irq); 477 478 /* enable vector */ 479 hns3_mask_vector_irq(tqp_vector, 1); 480 } 481 482 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) 483 { 484 /* disable vector */ 485 hns3_mask_vector_irq(tqp_vector, 0); 486 487 disable_irq(tqp_vector->vector_irq); 488 napi_disable(&tqp_vector->napi); 489 cancel_work_sync(&tqp_vector->rx_group.dim.work); 490 cancel_work_sync(&tqp_vector->tx_group.dim.work); 491 } 492 493 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, 494 u32 rl_value) 495 { 496 u32 rl_reg = hns3_rl_usec_to_reg(rl_value); 497 498 /* this defines the configuration for RL (Interrupt Rate Limiter). 499 * Rl defines rate of interrupts i.e. number of interrupts-per-second 500 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing 501 */ 502 if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable && 503 !tqp_vector->rx_group.coal.adapt_enable) 504 /* According to the hardware, the range of rl_reg is 505 * 0-59 and the unit is 4. 506 */ 507 rl_reg |= HNS3_INT_RL_ENABLE_MASK; 508 509 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); 510 } 511 512 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, 513 u32 gl_value) 514 { 515 u32 new_val; 516 517 if (tqp_vector->rx_group.coal.unit_1us) 518 new_val = gl_value | HNS3_INT_GL_1US; 519 else 520 new_val = hns3_gl_usec_to_reg(gl_value); 521 522 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); 523 } 524 525 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, 526 u32 gl_value) 527 { 528 u32 new_val; 529 530 if (tqp_vector->tx_group.coal.unit_1us) 531 new_val = gl_value | HNS3_INT_GL_1US; 532 else 533 new_val = hns3_gl_usec_to_reg(gl_value); 534 535 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); 536 } 537 538 void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, 539 u32 ql_value) 540 { 541 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET); 542 } 543 544 void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector, 545 u32 ql_value) 546 { 547 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET); 548 } 549 550 static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector, 551 struct hns3_nic_priv *priv) 552 { 553 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); 554 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; 555 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; 556 struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal; 557 struct hns3_enet_coalesce *prx_coal = &priv->rx_coal; 558 559 tx_coal->adapt_enable = ptx_coal->adapt_enable; 560 rx_coal->adapt_enable = prx_coal->adapt_enable; 561 562 tx_coal->int_gl = ptx_coal->int_gl; 563 rx_coal->int_gl = prx_coal->int_gl; 564 565 rx_coal->flow_level = prx_coal->flow_level; 566 tx_coal->flow_level = ptx_coal->flow_level; 567 568 /* device version above V3(include V3), GL can configure 1us 569 * unit, so uses 1us unit. 570 */ 571 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { 572 tx_coal->unit_1us = 1; 573 rx_coal->unit_1us = 1; 574 } 575 576 if (ae_dev->dev_specs.int_ql_max) { 577 tx_coal->ql_enable = 1; 578 rx_coal->ql_enable = 1; 579 tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; 580 rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; 581 tx_coal->int_ql = ptx_coal->int_ql; 582 rx_coal->int_ql = prx_coal->int_ql; 583 } 584 } 585 586 static void 587 hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector, 588 struct hns3_nic_priv *priv) 589 { 590 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; 591 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; 592 struct hnae3_handle *h = priv->ae_handle; 593 594 hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl); 595 hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl); 596 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); 597 598 if (tx_coal->ql_enable) 599 hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql); 600 601 if (rx_coal->ql_enable) 602 hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql); 603 } 604 605 static int hns3_nic_set_real_num_queue(struct net_device *netdev) 606 { 607 struct hnae3_handle *h = hns3_get_handle(netdev); 608 struct hnae3_knic_private_info *kinfo = &h->kinfo; 609 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 610 unsigned int queue_size = kinfo->num_tqps; 611 int i, ret; 612 613 if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) { 614 netdev_reset_tc(netdev); 615 } else { 616 ret = netdev_set_num_tc(netdev, tc_info->num_tc); 617 if (ret) { 618 netdev_err(netdev, 619 "netdev_set_num_tc fail, ret=%d!\n", ret); 620 return ret; 621 } 622 623 for (i = 0; i < tc_info->num_tc; i++) 624 netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], 625 tc_info->tqp_offset[i]); 626 } 627 628 ret = netif_set_real_num_tx_queues(netdev, queue_size); 629 if (ret) { 630 netdev_err(netdev, 631 "netif_set_real_num_tx_queues fail, ret=%d!\n", ret); 632 return ret; 633 } 634 635 ret = netif_set_real_num_rx_queues(netdev, queue_size); 636 if (ret) { 637 netdev_err(netdev, 638 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 639 return ret; 640 } 641 642 return 0; 643 } 644 645 u16 hns3_get_max_available_channels(struct hnae3_handle *h) 646 { 647 u16 alloc_tqps, max_rss_size, rss_size; 648 649 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); 650 rss_size = alloc_tqps / h->kinfo.tc_info.num_tc; 651 652 return min_t(u16, rss_size, max_rss_size); 653 } 654 655 static void hns3_tqp_enable(struct hnae3_queue *tqp) 656 { 657 u32 rcb_reg; 658 659 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 660 rcb_reg |= BIT(HNS3_RING_EN_B); 661 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 662 } 663 664 static void hns3_tqp_disable(struct hnae3_queue *tqp) 665 { 666 u32 rcb_reg; 667 668 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); 669 rcb_reg &= ~BIT(HNS3_RING_EN_B); 670 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); 671 } 672 673 static void hns3_free_rx_cpu_rmap(struct net_device *netdev) 674 { 675 #ifdef CONFIG_RFS_ACCEL 676 free_irq_cpu_rmap(netdev->rx_cpu_rmap); 677 netdev->rx_cpu_rmap = NULL; 678 #endif 679 } 680 681 static int hns3_set_rx_cpu_rmap(struct net_device *netdev) 682 { 683 #ifdef CONFIG_RFS_ACCEL 684 struct hns3_nic_priv *priv = netdev_priv(netdev); 685 struct hns3_enet_tqp_vector *tqp_vector; 686 int i, ret; 687 688 if (!netdev->rx_cpu_rmap) { 689 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); 690 if (!netdev->rx_cpu_rmap) 691 return -ENOMEM; 692 } 693 694 for (i = 0; i < priv->vector_num; i++) { 695 tqp_vector = &priv->tqp_vector[i]; 696 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, 697 tqp_vector->vector_irq); 698 if (ret) { 699 hns3_free_rx_cpu_rmap(netdev); 700 return ret; 701 } 702 } 703 #endif 704 return 0; 705 } 706 707 static int hns3_nic_net_up(struct net_device *netdev) 708 { 709 struct hns3_nic_priv *priv = netdev_priv(netdev); 710 struct hnae3_handle *h = priv->ae_handle; 711 int i, j; 712 int ret; 713 714 ret = hns3_nic_reset_all_ring(h); 715 if (ret) 716 return ret; 717 718 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); 719 720 /* enable the vectors */ 721 for (i = 0; i < priv->vector_num; i++) 722 hns3_vector_enable(&priv->tqp_vector[i]); 723 724 /* enable rcb */ 725 for (j = 0; j < h->kinfo.num_tqps; j++) 726 hns3_tqp_enable(h->kinfo.tqp[j]); 727 728 /* start the ae_dev */ 729 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; 730 if (ret) { 731 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 732 while (j--) 733 hns3_tqp_disable(h->kinfo.tqp[j]); 734 735 for (j = i - 1; j >= 0; j--) 736 hns3_vector_disable(&priv->tqp_vector[j]); 737 } 738 739 return ret; 740 } 741 742 static void hns3_config_xps(struct hns3_nic_priv *priv) 743 { 744 int i; 745 746 for (i = 0; i < priv->vector_num; i++) { 747 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; 748 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; 749 750 while (ring) { 751 int ret; 752 753 ret = netif_set_xps_queue(priv->netdev, 754 &tqp_vector->affinity_mask, 755 ring->tqp->tqp_index); 756 if (ret) 757 netdev_warn(priv->netdev, 758 "set xps queue failed: %d", ret); 759 760 ring = ring->next; 761 } 762 } 763 } 764 765 static int hns3_nic_net_open(struct net_device *netdev) 766 { 767 struct hns3_nic_priv *priv = netdev_priv(netdev); 768 struct hnae3_handle *h = hns3_get_handle(netdev); 769 struct hnae3_knic_private_info *kinfo; 770 int i, ret; 771 772 if (hns3_nic_resetting(netdev)) 773 return -EBUSY; 774 775 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 776 netdev_warn(netdev, "net open repeatedly!\n"); 777 return 0; 778 } 779 780 netif_carrier_off(netdev); 781 782 ret = hns3_nic_set_real_num_queue(netdev); 783 if (ret) 784 return ret; 785 786 ret = hns3_nic_net_up(netdev); 787 if (ret) { 788 netdev_err(netdev, "net up fail, ret=%d!\n", ret); 789 return ret; 790 } 791 792 kinfo = &h->kinfo; 793 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 794 netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]); 795 796 if (h->ae_algo->ops->set_timer_task) 797 h->ae_algo->ops->set_timer_task(priv->ae_handle, true); 798 799 hns3_config_xps(priv); 800 801 netif_dbg(h, drv, netdev, "net open\n"); 802 803 return 0; 804 } 805 806 static void hns3_reset_tx_queue(struct hnae3_handle *h) 807 { 808 struct net_device *ndev = h->kinfo.netdev; 809 struct hns3_nic_priv *priv = netdev_priv(ndev); 810 struct netdev_queue *dev_queue; 811 u32 i; 812 813 for (i = 0; i < h->kinfo.num_tqps; i++) { 814 dev_queue = netdev_get_tx_queue(ndev, 815 priv->ring[i].queue_index); 816 netdev_tx_reset_queue(dev_queue); 817 } 818 } 819 820 static void hns3_nic_net_down(struct net_device *netdev) 821 { 822 struct hns3_nic_priv *priv = netdev_priv(netdev); 823 struct hnae3_handle *h = hns3_get_handle(netdev); 824 const struct hnae3_ae_ops *ops; 825 int i; 826 827 /* disable vectors */ 828 for (i = 0; i < priv->vector_num; i++) 829 hns3_vector_disable(&priv->tqp_vector[i]); 830 831 /* disable rcb */ 832 for (i = 0; i < h->kinfo.num_tqps; i++) 833 hns3_tqp_disable(h->kinfo.tqp[i]); 834 835 /* stop ae_dev */ 836 ops = priv->ae_handle->ae_algo->ops; 837 if (ops->stop) 838 ops->stop(priv->ae_handle); 839 840 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet 841 * during reset process, because driver may not be able 842 * to disable the ring through firmware when downing the netdev. 843 */ 844 if (!hns3_nic_resetting(netdev)) 845 hns3_clear_all_ring(priv->ae_handle, false); 846 847 hns3_reset_tx_queue(priv->ae_handle); 848 } 849 850 static int hns3_nic_net_stop(struct net_device *netdev) 851 { 852 struct hns3_nic_priv *priv = netdev_priv(netdev); 853 struct hnae3_handle *h = hns3_get_handle(netdev); 854 855 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 856 return 0; 857 858 netif_dbg(h, drv, netdev, "net stop\n"); 859 860 if (h->ae_algo->ops->set_timer_task) 861 h->ae_algo->ops->set_timer_task(priv->ae_handle, false); 862 863 netif_carrier_off(netdev); 864 netif_tx_disable(netdev); 865 866 hns3_nic_net_down(netdev); 867 868 return 0; 869 } 870 871 static int hns3_nic_uc_sync(struct net_device *netdev, 872 const unsigned char *addr) 873 { 874 struct hnae3_handle *h = hns3_get_handle(netdev); 875 876 if (h->ae_algo->ops->add_uc_addr) 877 return h->ae_algo->ops->add_uc_addr(h, addr); 878 879 return 0; 880 } 881 882 static int hns3_nic_uc_unsync(struct net_device *netdev, 883 const unsigned char *addr) 884 { 885 struct hnae3_handle *h = hns3_get_handle(netdev); 886 887 /* need ignore the request of removing device address, because 888 * we store the device address and other addresses of uc list 889 * in the function's mac filter list. 890 */ 891 if (ether_addr_equal(addr, netdev->dev_addr)) 892 return 0; 893 894 if (h->ae_algo->ops->rm_uc_addr) 895 return h->ae_algo->ops->rm_uc_addr(h, addr); 896 897 return 0; 898 } 899 900 static int hns3_nic_mc_sync(struct net_device *netdev, 901 const unsigned char *addr) 902 { 903 struct hnae3_handle *h = hns3_get_handle(netdev); 904 905 if (h->ae_algo->ops->add_mc_addr) 906 return h->ae_algo->ops->add_mc_addr(h, addr); 907 908 return 0; 909 } 910 911 static int hns3_nic_mc_unsync(struct net_device *netdev, 912 const unsigned char *addr) 913 { 914 struct hnae3_handle *h = hns3_get_handle(netdev); 915 916 if (h->ae_algo->ops->rm_mc_addr) 917 return h->ae_algo->ops->rm_mc_addr(h, addr); 918 919 return 0; 920 } 921 922 static u8 hns3_get_netdev_flags(struct net_device *netdev) 923 { 924 u8 flags = 0; 925 926 if (netdev->flags & IFF_PROMISC) 927 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE; 928 else if (netdev->flags & IFF_ALLMULTI) 929 flags = HNAE3_USER_MPE; 930 931 return flags; 932 } 933 934 static void hns3_nic_set_rx_mode(struct net_device *netdev) 935 { 936 struct hnae3_handle *h = hns3_get_handle(netdev); 937 u8 new_flags; 938 939 new_flags = hns3_get_netdev_flags(netdev); 940 941 __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); 942 __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync); 943 944 /* User mode Promisc mode enable and vlan filtering is disabled to 945 * let all packets in. 946 */ 947 h->netdev_flags = new_flags; 948 hns3_request_update_promisc_mode(h); 949 } 950 951 void hns3_request_update_promisc_mode(struct hnae3_handle *handle) 952 { 953 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 954 955 if (ops->request_update_promisc_mode) 956 ops->request_update_promisc_mode(handle); 957 } 958 959 static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring) 960 { 961 struct hns3_tx_spare *tx_spare = ring->tx_spare; 962 u32 ntc, ntu; 963 964 /* This smp_load_acquire() pairs with smp_store_release() in 965 * hns3_tx_spare_update() called in tx desc cleaning process. 966 */ 967 ntc = smp_load_acquire(&tx_spare->last_to_clean); 968 ntu = tx_spare->next_to_use; 969 970 if (ntc > ntu) 971 return ntc - ntu - 1; 972 973 /* The free tx buffer is divided into two part, so pick the 974 * larger one. 975 */ 976 return max(ntc, tx_spare->len - ntu) - 1; 977 } 978 979 static void hns3_tx_spare_update(struct hns3_enet_ring *ring) 980 { 981 struct hns3_tx_spare *tx_spare = ring->tx_spare; 982 983 if (!tx_spare || 984 tx_spare->last_to_clean == tx_spare->next_to_clean) 985 return; 986 987 /* This smp_store_release() pairs with smp_load_acquire() in 988 * hns3_tx_spare_space() called in xmit process. 989 */ 990 smp_store_release(&tx_spare->last_to_clean, 991 tx_spare->next_to_clean); 992 } 993 994 static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring, 995 struct sk_buff *skb, 996 u32 space) 997 { 998 u32 len = skb->len <= ring->tx_copybreak ? skb->len : 999 skb_headlen(skb); 1000 1001 if (len > ring->tx_copybreak) 1002 return false; 1003 1004 if (ALIGN(len, dma_get_cache_alignment()) > space) { 1005 hns3_ring_stats_update(ring, tx_spare_full); 1006 return false; 1007 } 1008 1009 return true; 1010 } 1011 1012 static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, 1013 struct sk_buff *skb, 1014 u32 space) 1015 { 1016 if (skb->len <= ring->tx_copybreak || !tx_sgl || 1017 (!skb_has_frag_list(skb) && 1018 skb_shinfo(skb)->nr_frags < tx_sgl)) 1019 return false; 1020 1021 if (space < HNS3_MAX_SGL_SIZE) { 1022 hns3_ring_stats_update(ring, tx_spare_full); 1023 return false; 1024 } 1025 1026 return true; 1027 } 1028 1029 static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) 1030 { 1031 struct hns3_tx_spare *tx_spare; 1032 struct page *page; 1033 u32 alloc_size; 1034 dma_addr_t dma; 1035 int order; 1036 1037 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; 1038 if (!alloc_size) 1039 return; 1040 1041 order = get_order(alloc_size); 1042 tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare), 1043 GFP_KERNEL); 1044 if (!tx_spare) { 1045 /* The driver still work without the tx spare buffer */ 1046 dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n"); 1047 return; 1048 } 1049 1050 page = alloc_pages_node(dev_to_node(ring_to_dev(ring)), 1051 GFP_KERNEL, order); 1052 if (!page) { 1053 dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n"); 1054 devm_kfree(ring_to_dev(ring), tx_spare); 1055 return; 1056 } 1057 1058 dma = dma_map_page(ring_to_dev(ring), page, 0, 1059 PAGE_SIZE << order, DMA_TO_DEVICE); 1060 if (dma_mapping_error(ring_to_dev(ring), dma)) { 1061 dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n"); 1062 put_page(page); 1063 devm_kfree(ring_to_dev(ring), tx_spare); 1064 return; 1065 } 1066 1067 tx_spare->dma = dma; 1068 tx_spare->buf = page_address(page); 1069 tx_spare->len = PAGE_SIZE << order; 1070 ring->tx_spare = tx_spare; 1071 } 1072 1073 /* Use hns3_tx_spare_space() to make sure there is enough buffer 1074 * before calling below function to allocate tx buffer. 1075 */ 1076 static void *hns3_tx_spare_alloc(struct hns3_enet_ring *ring, 1077 unsigned int size, dma_addr_t *dma, 1078 u32 *cb_len) 1079 { 1080 struct hns3_tx_spare *tx_spare = ring->tx_spare; 1081 u32 ntu = tx_spare->next_to_use; 1082 1083 size = ALIGN(size, dma_get_cache_alignment()); 1084 *cb_len = size; 1085 1086 /* Tx spare buffer wraps back here because the end of 1087 * freed tx buffer is not enough. 1088 */ 1089 if (ntu + size > tx_spare->len) { 1090 *cb_len += (tx_spare->len - ntu); 1091 ntu = 0; 1092 } 1093 1094 tx_spare->next_to_use = ntu + size; 1095 if (tx_spare->next_to_use == tx_spare->len) 1096 tx_spare->next_to_use = 0; 1097 1098 *dma = tx_spare->dma + ntu; 1099 1100 return tx_spare->buf + ntu; 1101 } 1102 1103 static void hns3_tx_spare_rollback(struct hns3_enet_ring *ring, u32 len) 1104 { 1105 struct hns3_tx_spare *tx_spare = ring->tx_spare; 1106 1107 if (len > tx_spare->next_to_use) { 1108 len -= tx_spare->next_to_use; 1109 tx_spare->next_to_use = tx_spare->len - len; 1110 } else { 1111 tx_spare->next_to_use -= len; 1112 } 1113 } 1114 1115 static void hns3_tx_spare_reclaim_cb(struct hns3_enet_ring *ring, 1116 struct hns3_desc_cb *cb) 1117 { 1118 struct hns3_tx_spare *tx_spare = ring->tx_spare; 1119 u32 ntc = tx_spare->next_to_clean; 1120 u32 len = cb->length; 1121 1122 tx_spare->next_to_clean += len; 1123 1124 if (tx_spare->next_to_clean >= tx_spare->len) { 1125 tx_spare->next_to_clean -= tx_spare->len; 1126 1127 if (tx_spare->next_to_clean) { 1128 ntc = 0; 1129 len = tx_spare->next_to_clean; 1130 } 1131 } 1132 1133 /* This tx spare buffer is only really reclaimed after calling 1134 * hns3_tx_spare_update(), so it is still safe to use the info in 1135 * the tx buffer to do the dma sync or sg unmapping after 1136 * tx_spare->next_to_clean is moved forword. 1137 */ 1138 if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) { 1139 dma_addr_t dma = tx_spare->dma + ntc; 1140 1141 dma_sync_single_for_cpu(ring_to_dev(ring), dma, len, 1142 DMA_TO_DEVICE); 1143 } else { 1144 struct sg_table *sgt = tx_spare->buf + ntc; 1145 1146 dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, 1147 DMA_TO_DEVICE); 1148 } 1149 } 1150 1151 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs, 1152 u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes) 1153 { 1154 u32 l4_offset, hdr_len; 1155 union l3_hdr_info l3; 1156 union l4_hdr_info l4; 1157 u32 l4_paylen; 1158 int ret; 1159 1160 if (!skb_is_gso(skb)) 1161 return 0; 1162 1163 ret = skb_cow_head(skb, 0); 1164 if (unlikely(ret < 0)) 1165 return ret; 1166 1167 l3.hdr = skb_network_header(skb); 1168 l4.hdr = skb_transport_header(skb); 1169 1170 /* Software should clear the IPv4's checksum field when tso is 1171 * needed. 1172 */ 1173 if (l3.v4->version == 4) 1174 l3.v4->check = 0; 1175 1176 /* tunnel packet */ 1177 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 1178 SKB_GSO_GRE_CSUM | 1179 SKB_GSO_UDP_TUNNEL | 1180 SKB_GSO_UDP_TUNNEL_CSUM)) { 1181 /* reset l3&l4 pointers from outer to inner headers */ 1182 l3.hdr = skb_inner_network_header(skb); 1183 l4.hdr = skb_inner_transport_header(skb); 1184 1185 /* Software should clear the IPv4's checksum field when 1186 * tso is needed. 1187 */ 1188 if (l3.v4->version == 4) 1189 l3.v4->check = 0; 1190 } 1191 1192 /* normal or tunnel packet */ 1193 l4_offset = l4.hdr - skb->data; 1194 1195 /* remove payload length from inner pseudo checksum when tso */ 1196 l4_paylen = skb->len - l4_offset; 1197 1198 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 1199 hdr_len = sizeof(*l4.udp) + l4_offset; 1200 csum_replace_by_diff(&l4.udp->check, 1201 (__force __wsum)htonl(l4_paylen)); 1202 } else { 1203 hdr_len = (l4.tcp->doff << 2) + l4_offset; 1204 csum_replace_by_diff(&l4.tcp->check, 1205 (__force __wsum)htonl(l4_paylen)); 1206 } 1207 1208 *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len; 1209 1210 /* find the txbd field values */ 1211 *paylen_fdop_ol4cs = skb->len - hdr_len; 1212 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); 1213 1214 /* offload outer UDP header checksum */ 1215 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) 1216 hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1); 1217 1218 /* get MSS for TSO */ 1219 *mss = skb_shinfo(skb)->gso_size; 1220 1221 trace_hns3_tso(skb); 1222 1223 return 0; 1224 } 1225 1226 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, 1227 u8 *il4_proto) 1228 { 1229 union l3_hdr_info l3; 1230 unsigned char *l4_hdr; 1231 unsigned char *exthdr; 1232 u8 l4_proto_tmp; 1233 __be16 frag_off; 1234 1235 /* find outer header point */ 1236 l3.hdr = skb_network_header(skb); 1237 l4_hdr = skb_transport_header(skb); 1238 1239 if (skb->protocol == htons(ETH_P_IPV6)) { 1240 exthdr = l3.hdr + sizeof(*l3.v6); 1241 l4_proto_tmp = l3.v6->nexthdr; 1242 if (l4_hdr != exthdr) 1243 ipv6_skip_exthdr(skb, exthdr - skb->data, 1244 &l4_proto_tmp, &frag_off); 1245 } else if (skb->protocol == htons(ETH_P_IP)) { 1246 l4_proto_tmp = l3.v4->protocol; 1247 } else { 1248 return -EINVAL; 1249 } 1250 1251 *ol4_proto = l4_proto_tmp; 1252 1253 /* tunnel packet */ 1254 if (!skb->encapsulation) { 1255 *il4_proto = 0; 1256 return 0; 1257 } 1258 1259 /* find inner header point */ 1260 l3.hdr = skb_inner_network_header(skb); 1261 l4_hdr = skb_inner_transport_header(skb); 1262 1263 if (l3.v6->version == 6) { 1264 exthdr = l3.hdr + sizeof(*l3.v6); 1265 l4_proto_tmp = l3.v6->nexthdr; 1266 if (l4_hdr != exthdr) 1267 ipv6_skip_exthdr(skb, exthdr - skb->data, 1268 &l4_proto_tmp, &frag_off); 1269 } else if (l3.v4->version == 4) { 1270 l4_proto_tmp = l3.v4->protocol; 1271 } 1272 1273 *il4_proto = l4_proto_tmp; 1274 1275 return 0; 1276 } 1277 1278 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL 1279 * and it is udp packet, which has a dest port as the IANA assigned. 1280 * the hardware is expected to do the checksum offload, but the 1281 * hardware will not do the checksum offload when udp dest port is 1282 * 4789, 4790 or 6081. 1283 */ 1284 static bool hns3_tunnel_csum_bug(struct sk_buff *skb) 1285 { 1286 struct hns3_nic_priv *priv = netdev_priv(skb->dev); 1287 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); 1288 union l4_hdr_info l4; 1289 1290 /* device version above V3(include V3), the hardware can 1291 * do this checksum offload. 1292 */ 1293 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 1294 return false; 1295 1296 l4.hdr = skb_transport_header(skb); 1297 1298 if (!(!skb->encapsulation && 1299 (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || 1300 l4.udp->dest == htons(GENEVE_UDP_PORT) || 1301 l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT)))) 1302 return false; 1303 1304 return true; 1305 } 1306 1307 static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 1308 u32 *ol_type_vlan_len_msec) 1309 { 1310 u32 l2_len, l3_len, l4_len; 1311 unsigned char *il2_hdr; 1312 union l3_hdr_info l3; 1313 union l4_hdr_info l4; 1314 1315 l3.hdr = skb_network_header(skb); 1316 l4.hdr = skb_transport_header(skb); 1317 1318 /* compute OL2 header size, defined in 2 Bytes */ 1319 l2_len = l3.hdr - skb->data; 1320 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1); 1321 1322 /* compute OL3 header size, defined in 4 Bytes */ 1323 l3_len = l4.hdr - l3.hdr; 1324 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2); 1325 1326 il2_hdr = skb_inner_mac_header(skb); 1327 /* compute OL4 header size, defined in 4 Bytes */ 1328 l4_len = il2_hdr - l4.hdr; 1329 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2); 1330 1331 /* define outer network header type */ 1332 if (skb->protocol == htons(ETH_P_IP)) { 1333 if (skb_is_gso(skb)) 1334 hns3_set_field(*ol_type_vlan_len_msec, 1335 HNS3_TXD_OL3T_S, 1336 HNS3_OL3T_IPV4_CSUM); 1337 else 1338 hns3_set_field(*ol_type_vlan_len_msec, 1339 HNS3_TXD_OL3T_S, 1340 HNS3_OL3T_IPV4_NO_CSUM); 1341 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1342 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, 1343 HNS3_OL3T_IPV6); 1344 } 1345 1346 if (ol4_proto == IPPROTO_UDP) 1347 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 1348 HNS3_TUN_MAC_IN_UDP); 1349 else if (ol4_proto == IPPROTO_GRE) 1350 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, 1351 HNS3_TUN_NVGRE); 1352 } 1353 1354 static void hns3_set_l3_type(struct sk_buff *skb, union l3_hdr_info l3, 1355 u32 *type_cs_vlan_tso) 1356 { 1357 if (l3.v4->version == 4) { 1358 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 1359 HNS3_L3T_IPV4); 1360 1361 /* the stack computes the IP header already, the only time we 1362 * need the hardware to recompute it is in the case of TSO. 1363 */ 1364 if (skb_is_gso(skb)) 1365 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); 1366 } else if (l3.v6->version == 6) { 1367 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, 1368 HNS3_L3T_IPV6); 1369 } 1370 } 1371 1372 static int hns3_set_l4_csum_length(struct sk_buff *skb, union l4_hdr_info l4, 1373 u32 l4_proto, u32 *type_cs_vlan_tso) 1374 { 1375 /* compute inner(/normal) L4 header size, defined in 4 Bytes */ 1376 switch (l4_proto) { 1377 case IPPROTO_TCP: 1378 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 1379 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 1380 HNS3_L4T_TCP); 1381 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 1382 l4.tcp->doff); 1383 break; 1384 case IPPROTO_UDP: 1385 if (hns3_tunnel_csum_bug(skb)) { 1386 int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN); 1387 1388 return ret ? ret : skb_checksum_help(skb); 1389 } 1390 1391 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 1392 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 1393 HNS3_L4T_UDP); 1394 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 1395 (sizeof(struct udphdr) >> 2)); 1396 break; 1397 case IPPROTO_SCTP: 1398 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); 1399 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, 1400 HNS3_L4T_SCTP); 1401 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, 1402 (sizeof(struct sctphdr) >> 2)); 1403 break; 1404 default: 1405 /* drop the skb tunnel packet if hardware don't support, 1406 * because hardware can't calculate csum when TSO. 1407 */ 1408 if (skb_is_gso(skb)) 1409 return -EDOM; 1410 1411 /* the stack computes the IP header already, 1412 * driver calculate l4 checksum when not TSO. 1413 */ 1414 return skb_checksum_help(skb); 1415 } 1416 1417 return 0; 1418 } 1419 1420 static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, 1421 u8 il4_proto, u32 *type_cs_vlan_tso, 1422 u32 *ol_type_vlan_len_msec) 1423 { 1424 unsigned char *l2_hdr = skb->data; 1425 u32 l4_proto = ol4_proto; 1426 union l4_hdr_info l4; 1427 union l3_hdr_info l3; 1428 u32 l2_len, l3_len; 1429 1430 l4.hdr = skb_transport_header(skb); 1431 l3.hdr = skb_network_header(skb); 1432 1433 /* handle encapsulation skb */ 1434 if (skb->encapsulation) { 1435 /* If this is a not UDP/GRE encapsulation skb */ 1436 if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { 1437 /* drop the skb tunnel packet if hardware don't support, 1438 * because hardware can't calculate csum when TSO. 1439 */ 1440 if (skb_is_gso(skb)) 1441 return -EDOM; 1442 1443 /* the stack computes the IP header already, 1444 * driver calculate l4 checksum when not TSO. 1445 */ 1446 return skb_checksum_help(skb); 1447 } 1448 1449 hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); 1450 1451 /* switch to inner header */ 1452 l2_hdr = skb_inner_mac_header(skb); 1453 l3.hdr = skb_inner_network_header(skb); 1454 l4.hdr = skb_inner_transport_header(skb); 1455 l4_proto = il4_proto; 1456 } 1457 1458 hns3_set_l3_type(skb, l3, type_cs_vlan_tso); 1459 1460 /* compute inner(/normal) L2 header size, defined in 2 Bytes */ 1461 l2_len = l3.hdr - l2_hdr; 1462 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); 1463 1464 /* compute inner(/normal) L3 header size, defined in 4 Bytes */ 1465 l3_len = l4.hdr - l3.hdr; 1466 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); 1467 1468 return hns3_set_l4_csum_length(skb, l4, l4_proto, type_cs_vlan_tso); 1469 } 1470 1471 static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, 1472 struct sk_buff *skb) 1473 { 1474 struct hnae3_handle *handle = tx_ring->tqp->handle; 1475 struct hnae3_ae_dev *ae_dev; 1476 struct vlan_ethhdr *vhdr; 1477 int rc; 1478 1479 if (!(skb->protocol == htons(ETH_P_8021Q) || 1480 skb_vlan_tag_present(skb))) 1481 return 0; 1482 1483 /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert 1484 * VLAN enabled, only one VLAN header is allowed in skb, otherwise it 1485 * will cause RAS error. 1486 */ 1487 ae_dev = pci_get_drvdata(handle->pdev); 1488 if (unlikely(skb_vlan_tagged_multi(skb) && 1489 ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && 1490 handle->port_base_vlan_state == 1491 HNAE3_PORT_BASE_VLAN_ENABLE)) 1492 return -EINVAL; 1493 1494 if (skb->protocol == htons(ETH_P_8021Q) && 1495 !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 1496 /* When HW VLAN acceleration is turned off, and the stack 1497 * sets the protocol to 802.1q, the driver just need to 1498 * set the protocol to the encapsulated ethertype. 1499 */ 1500 skb->protocol = vlan_get_protocol(skb); 1501 return 0; 1502 } 1503 1504 if (skb_vlan_tag_present(skb)) { 1505 /* Based on hw strategy, use out_vtag in two layer tag case, 1506 * and use inner_vtag in one tag case. 1507 */ 1508 if (skb->protocol == htons(ETH_P_8021Q) && 1509 handle->port_base_vlan_state == 1510 HNAE3_PORT_BASE_VLAN_DISABLE) 1511 rc = HNS3_OUTER_VLAN_TAG; 1512 else 1513 rc = HNS3_INNER_VLAN_TAG; 1514 1515 skb->protocol = vlan_get_protocol(skb); 1516 return rc; 1517 } 1518 1519 rc = skb_cow_head(skb, 0); 1520 if (unlikely(rc < 0)) 1521 return rc; 1522 1523 vhdr = (struct vlan_ethhdr *)skb->data; 1524 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) 1525 & VLAN_PRIO_MASK); 1526 1527 skb->protocol = vlan_get_protocol(skb); 1528 return 0; 1529 } 1530 1531 /* check if the hardware is capable of checksum offloading */ 1532 static bool hns3_check_hw_tx_csum(struct sk_buff *skb) 1533 { 1534 struct hns3_nic_priv *priv = netdev_priv(skb->dev); 1535 1536 /* Kindly note, due to backward compatibility of the TX descriptor, 1537 * HW checksum of the non-IP packets and GSO packets is handled at 1538 * different place in the following code 1539 */ 1540 if (skb_csum_is_sctp(skb) || skb_is_gso(skb) || 1541 !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state)) 1542 return false; 1543 1544 return true; 1545 } 1546 1547 struct hns3_desc_param { 1548 u32 paylen_ol4cs; 1549 u32 ol_type_vlan_len_msec; 1550 u32 type_cs_vlan_tso; 1551 u16 mss_hw_csum; 1552 u16 inner_vtag; 1553 u16 out_vtag; 1554 }; 1555 1556 static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa) 1557 { 1558 pa->paylen_ol4cs = skb->len; 1559 pa->ol_type_vlan_len_msec = 0; 1560 pa->type_cs_vlan_tso = 0; 1561 pa->mss_hw_csum = 0; 1562 pa->inner_vtag = 0; 1563 pa->out_vtag = 0; 1564 } 1565 1566 static int hns3_handle_vlan_info(struct hns3_enet_ring *ring, 1567 struct sk_buff *skb, 1568 struct hns3_desc_param *param) 1569 { 1570 int ret; 1571 1572 ret = hns3_handle_vtags(ring, skb); 1573 if (unlikely(ret < 0)) { 1574 hns3_ring_stats_update(ring, tx_vlan_err); 1575 return ret; 1576 } else if (ret == HNS3_INNER_VLAN_TAG) { 1577 param->inner_vtag = skb_vlan_tag_get(skb); 1578 param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & 1579 VLAN_PRIO_MASK; 1580 hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); 1581 } else if (ret == HNS3_OUTER_VLAN_TAG) { 1582 param->out_vtag = skb_vlan_tag_get(skb); 1583 param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & 1584 VLAN_PRIO_MASK; 1585 hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, 1586 1); 1587 } 1588 return 0; 1589 } 1590 1591 static int hns3_handle_csum_partial(struct hns3_enet_ring *ring, 1592 struct sk_buff *skb, 1593 struct hns3_desc_cb *desc_cb, 1594 struct hns3_desc_param *param) 1595 { 1596 u8 ol4_proto, il4_proto; 1597 int ret; 1598 1599 if (hns3_check_hw_tx_csum(skb)) { 1600 /* set checksum start and offset, defined in 2 Bytes */ 1601 hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, 1602 skb_checksum_start_offset(skb) >> 1); 1603 hns3_set_field(param->ol_type_vlan_len_msec, 1604 HNS3_TXD_CSUM_OFFSET_S, 1605 skb->csum_offset >> 1); 1606 param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); 1607 return 0; 1608 } 1609 1610 skb_reset_mac_len(skb); 1611 1612 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); 1613 if (unlikely(ret < 0)) { 1614 hns3_ring_stats_update(ring, tx_l4_proto_err); 1615 return ret; 1616 } 1617 1618 ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, 1619 ¶m->type_cs_vlan_tso, 1620 ¶m->ol_type_vlan_len_msec); 1621 if (unlikely(ret < 0)) { 1622 hns3_ring_stats_update(ring, tx_l2l3l4_err); 1623 return ret; 1624 } 1625 1626 ret = hns3_set_tso(skb, ¶m->paylen_ol4cs, ¶m->mss_hw_csum, 1627 ¶m->type_cs_vlan_tso, &desc_cb->send_bytes); 1628 if (unlikely(ret < 0)) { 1629 hns3_ring_stats_update(ring, tx_tso_err); 1630 return ret; 1631 } 1632 return 0; 1633 } 1634 1635 static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, 1636 struct sk_buff *skb, struct hns3_desc *desc, 1637 struct hns3_desc_cb *desc_cb) 1638 { 1639 struct hns3_desc_param param; 1640 int ret; 1641 1642 hns3_init_desc_data(skb, ¶m); 1643 ret = hns3_handle_vlan_info(ring, skb, ¶m); 1644 if (unlikely(ret < 0)) 1645 return ret; 1646 1647 desc_cb->send_bytes = skb->len; 1648 1649 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1650 ret = hns3_handle_csum_partial(ring, skb, desc_cb, ¶m); 1651 if (ret) 1652 return ret; 1653 } 1654 1655 /* Set txbd */ 1656 desc->tx.ol_type_vlan_len_msec = 1657 cpu_to_le32(param.ol_type_vlan_len_msec); 1658 desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso); 1659 desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs); 1660 desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum); 1661 desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag); 1662 desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag); 1663 1664 return 0; 1665 } 1666 1667 static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma, 1668 unsigned int size) 1669 { 1670 #define HNS3_LIKELY_BD_NUM 1 1671 1672 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1673 unsigned int frag_buf_num; 1674 int k, sizeoflast; 1675 1676 if (likely(size <= HNS3_MAX_BD_SIZE)) { 1677 desc->addr = cpu_to_le64(dma); 1678 desc->tx.send_size = cpu_to_le16(size); 1679 desc->tx.bdtp_fe_sc_vld_ra_ri = 1680 cpu_to_le16(BIT(HNS3_TXD_VLD_B)); 1681 1682 trace_hns3_tx_desc(ring, ring->next_to_use); 1683 ring_ptr_move_fw(ring, next_to_use); 1684 return HNS3_LIKELY_BD_NUM; 1685 } 1686 1687 frag_buf_num = hns3_tx_bd_count(size); 1688 sizeoflast = size % HNS3_MAX_BD_SIZE; 1689 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 1690 1691 /* When frag size is bigger than hardware limit, split this frag */ 1692 for (k = 0; k < frag_buf_num; k++) { 1693 /* now, fill the descriptor */ 1694 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); 1695 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? 1696 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); 1697 desc->tx.bdtp_fe_sc_vld_ra_ri = 1698 cpu_to_le16(BIT(HNS3_TXD_VLD_B)); 1699 1700 trace_hns3_tx_desc(ring, ring->next_to_use); 1701 /* move ring pointer to next */ 1702 ring_ptr_move_fw(ring, next_to_use); 1703 1704 desc = &ring->desc[ring->next_to_use]; 1705 } 1706 1707 return frag_buf_num; 1708 } 1709 1710 static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv, 1711 unsigned int type) 1712 { 1713 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 1714 struct device *dev = ring_to_dev(ring); 1715 unsigned int size; 1716 dma_addr_t dma; 1717 1718 if (type & (DESC_TYPE_FRAGLIST_SKB | DESC_TYPE_SKB)) { 1719 struct sk_buff *skb = (struct sk_buff *)priv; 1720 1721 size = skb_headlen(skb); 1722 if (!size) 1723 return 0; 1724 1725 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 1726 } else if (type & DESC_TYPE_BOUNCE_HEAD) { 1727 /* Head data has been filled in hns3_handle_tx_bounce(), 1728 * just return 0 here. 1729 */ 1730 return 0; 1731 } else { 1732 skb_frag_t *frag = (skb_frag_t *)priv; 1733 1734 size = skb_frag_size(frag); 1735 if (!size) 1736 return 0; 1737 1738 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 1739 } 1740 1741 if (unlikely(dma_mapping_error(dev, dma))) { 1742 hns3_ring_stats_update(ring, sw_err_cnt); 1743 return -ENOMEM; 1744 } 1745 1746 desc_cb->priv = priv; 1747 desc_cb->length = size; 1748 desc_cb->dma = dma; 1749 desc_cb->type = type; 1750 1751 return hns3_fill_desc(ring, dma, size); 1752 } 1753 1754 static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, 1755 unsigned int bd_num) 1756 { 1757 unsigned int size; 1758 int i; 1759 1760 size = skb_headlen(skb); 1761 while (size > HNS3_MAX_BD_SIZE) { 1762 bd_size[bd_num++] = HNS3_MAX_BD_SIZE; 1763 size -= HNS3_MAX_BD_SIZE; 1764 1765 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1766 return bd_num; 1767 } 1768 1769 if (size) { 1770 bd_size[bd_num++] = size; 1771 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1772 return bd_num; 1773 } 1774 1775 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1776 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1777 size = skb_frag_size(frag); 1778 if (!size) 1779 continue; 1780 1781 while (size > HNS3_MAX_BD_SIZE) { 1782 bd_size[bd_num++] = HNS3_MAX_BD_SIZE; 1783 size -= HNS3_MAX_BD_SIZE; 1784 1785 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1786 return bd_num; 1787 } 1788 1789 bd_size[bd_num++] = size; 1790 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1791 return bd_num; 1792 } 1793 1794 return bd_num; 1795 } 1796 1797 static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size, 1798 u8 max_non_tso_bd_num, unsigned int bd_num, 1799 unsigned int recursion_level) 1800 { 1801 #define HNS3_MAX_RECURSION_LEVEL 24 1802 1803 struct sk_buff *frag_skb; 1804 1805 /* If the total len is within the max bd limit */ 1806 if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level && 1807 !skb_has_frag_list(skb) && 1808 skb_shinfo(skb)->nr_frags < max_non_tso_bd_num)) 1809 return skb_shinfo(skb)->nr_frags + 1U; 1810 1811 if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL)) 1812 return UINT_MAX; 1813 1814 bd_num = hns3_skb_bd_num(skb, bd_size, bd_num); 1815 if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM) 1816 return bd_num; 1817 1818 skb_walk_frags(skb, frag_skb) { 1819 bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num, 1820 bd_num, recursion_level + 1); 1821 if (bd_num > HNS3_MAX_TSO_BD_NUM) 1822 return bd_num; 1823 } 1824 1825 return bd_num; 1826 } 1827 1828 static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) 1829 { 1830 if (!skb->encapsulation) 1831 return skb_transport_offset(skb) + tcp_hdrlen(skb); 1832 1833 return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); 1834 } 1835 1836 /* HW need every continuous max_non_tso_bd_num buffer data to be larger 1837 * than MSS, we simplify it by ensuring skb_headlen + the first continuous 1838 * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss, 1839 * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger 1840 * than MSS except the last max_non_tso_bd_num - 1 frags. 1841 */ 1842 static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, 1843 unsigned int bd_num, u8 max_non_tso_bd_num) 1844 { 1845 unsigned int tot_len = 0; 1846 int i; 1847 1848 for (i = 0; i < max_non_tso_bd_num - 1U; i++) 1849 tot_len += bd_size[i]; 1850 1851 /* ensure the first max_non_tso_bd_num frags is greater than 1852 * mss + header 1853 */ 1854 if (tot_len + bd_size[max_non_tso_bd_num - 1U] < 1855 skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) 1856 return true; 1857 1858 /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater 1859 * than mss except the last one. 1860 */ 1861 for (i = 0; i < bd_num - max_non_tso_bd_num; i++) { 1862 tot_len -= bd_size[i]; 1863 tot_len += bd_size[i + max_non_tso_bd_num - 1U]; 1864 1865 if (tot_len < skb_shinfo(skb)->gso_size) 1866 return true; 1867 } 1868 1869 return false; 1870 } 1871 1872 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) 1873 { 1874 int i; 1875 1876 for (i = 0; i < MAX_SKB_FRAGS; i++) 1877 size[i] = skb_frag_size(&shinfo->frags[i]); 1878 } 1879 1880 static int hns3_skb_linearize(struct hns3_enet_ring *ring, 1881 struct sk_buff *skb, 1882 unsigned int bd_num) 1883 { 1884 /* 'bd_num == UINT_MAX' means the skb' fraglist has a 1885 * recursion level of over HNS3_MAX_RECURSION_LEVEL. 1886 */ 1887 if (bd_num == UINT_MAX) { 1888 hns3_ring_stats_update(ring, over_max_recursion); 1889 return -ENOMEM; 1890 } 1891 1892 /* The skb->len has exceeded the hw limitation, linearization 1893 * will not help. 1894 */ 1895 if (skb->len > HNS3_MAX_TSO_SIZE || 1896 (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { 1897 hns3_ring_stats_update(ring, hw_limitation); 1898 return -ENOMEM; 1899 } 1900 1901 if (__skb_linearize(skb)) { 1902 hns3_ring_stats_update(ring, sw_err_cnt); 1903 return -ENOMEM; 1904 } 1905 1906 return 0; 1907 } 1908 1909 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, 1910 struct net_device *netdev, 1911 struct sk_buff *skb) 1912 { 1913 struct hns3_nic_priv *priv = netdev_priv(netdev); 1914 u8 max_non_tso_bd_num = priv->max_non_tso_bd_num; 1915 unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; 1916 unsigned int bd_num; 1917 1918 bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0); 1919 if (unlikely(bd_num > max_non_tso_bd_num)) { 1920 if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && 1921 !hns3_skb_need_linearized(skb, bd_size, bd_num, 1922 max_non_tso_bd_num)) { 1923 trace_hns3_over_max_bd(skb); 1924 goto out; 1925 } 1926 1927 if (hns3_skb_linearize(ring, skb, bd_num)) 1928 return -ENOMEM; 1929 1930 bd_num = hns3_tx_bd_count(skb->len); 1931 1932 hns3_ring_stats_update(ring, tx_copy); 1933 } 1934 1935 out: 1936 if (likely(ring_space(ring) >= bd_num)) 1937 return bd_num; 1938 1939 netif_stop_subqueue(netdev, ring->queue_index); 1940 smp_mb(); /* Memory barrier before checking ring_space */ 1941 1942 /* Start queue in case hns3_clean_tx_ring has just made room 1943 * available and has not seen the queue stopped state performed 1944 * by netif_stop_subqueue above. 1945 */ 1946 if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) && 1947 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 1948 netif_start_subqueue(netdev, ring->queue_index); 1949 return bd_num; 1950 } 1951 1952 hns3_ring_stats_update(ring, tx_busy); 1953 1954 return -EBUSY; 1955 } 1956 1957 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) 1958 { 1959 struct device *dev = ring_to_dev(ring); 1960 unsigned int i; 1961 1962 for (i = 0; i < ring->desc_num; i++) { 1963 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 1964 struct hns3_desc_cb *desc_cb; 1965 1966 memset(desc, 0, sizeof(*desc)); 1967 1968 /* check if this is where we started */ 1969 if (ring->next_to_use == next_to_use_orig) 1970 break; 1971 1972 /* rollback one */ 1973 ring_ptr_move_bw(ring, next_to_use); 1974 1975 desc_cb = &ring->desc_cb[ring->next_to_use]; 1976 1977 if (!desc_cb->dma) 1978 continue; 1979 1980 /* unmap the descriptor dma address */ 1981 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) 1982 dma_unmap_single(dev, desc_cb->dma, desc_cb->length, 1983 DMA_TO_DEVICE); 1984 else if (desc_cb->type & 1985 (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) 1986 hns3_tx_spare_rollback(ring, desc_cb->length); 1987 else if (desc_cb->length) 1988 dma_unmap_page(dev, desc_cb->dma, desc_cb->length, 1989 DMA_TO_DEVICE); 1990 1991 desc_cb->length = 0; 1992 desc_cb->dma = 0; 1993 desc_cb->type = DESC_TYPE_UNKNOWN; 1994 } 1995 } 1996 1997 static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, 1998 struct sk_buff *skb, unsigned int type) 1999 { 2000 struct sk_buff *frag_skb; 2001 int i, ret, bd_num = 0; 2002 2003 ret = hns3_map_and_fill_desc(ring, skb, type); 2004 if (unlikely(ret < 0)) 2005 return ret; 2006 2007 bd_num += ret; 2008 2009 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2010 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2011 2012 ret = hns3_map_and_fill_desc(ring, frag, DESC_TYPE_PAGE); 2013 if (unlikely(ret < 0)) 2014 return ret; 2015 2016 bd_num += ret; 2017 } 2018 2019 skb_walk_frags(skb, frag_skb) { 2020 ret = hns3_fill_skb_to_desc(ring, frag_skb, 2021 DESC_TYPE_FRAGLIST_SKB); 2022 if (unlikely(ret < 0)) 2023 return ret; 2024 2025 bd_num += ret; 2026 } 2027 2028 return bd_num; 2029 } 2030 2031 static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num) 2032 { 2033 #define HNS3_BYTES_PER_64BIT 8 2034 2035 struct hns3_desc desc[HNS3_MAX_PUSH_BD_NUM] = {}; 2036 int offset = 0; 2037 2038 /* make sure everything is visible to device before 2039 * excuting tx push or updating doorbell 2040 */ 2041 dma_wmb(); 2042 2043 do { 2044 int idx = (ring->next_to_use - num + ring->desc_num) % 2045 ring->desc_num; 2046 2047 u64_stats_update_begin(&ring->syncp); 2048 ring->stats.tx_push++; 2049 u64_stats_update_end(&ring->syncp); 2050 memcpy(&desc[offset], &ring->desc[idx], 2051 sizeof(struct hns3_desc)); 2052 offset++; 2053 } while (--num); 2054 2055 __iowrite64_copy(ring->tqp->mem_base, desc, 2056 (sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) / 2057 HNS3_BYTES_PER_64BIT); 2058 2059 io_stop_wc(); 2060 } 2061 2062 static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring) 2063 { 2064 #define HNS3_MEM_DOORBELL_OFFSET 64 2065 2066 __le64 bd_num = cpu_to_le64((u64)ring->pending_buf); 2067 2068 /* make sure everything is visible to device before 2069 * excuting tx push or updating doorbell 2070 */ 2071 dma_wmb(); 2072 2073 __iowrite64_copy(ring->tqp->mem_base + HNS3_MEM_DOORBELL_OFFSET, 2074 &bd_num, 1); 2075 u64_stats_update_begin(&ring->syncp); 2076 ring->stats.tx_mem_doorbell += ring->pending_buf; 2077 u64_stats_update_end(&ring->syncp); 2078 2079 io_stop_wc(); 2080 } 2081 2082 static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, 2083 bool doorbell) 2084 { 2085 struct net_device *netdev = ring_to_netdev(ring); 2086 struct hns3_nic_priv *priv = netdev_priv(netdev); 2087 2088 /* when tx push is enabled, the packet whose number of BD below 2089 * HNS3_MAX_PUSH_BD_NUM can be pushed directly. 2090 */ 2091 if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num && 2092 !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) { 2093 hns3_tx_push_bd(ring, num); 2094 WRITE_ONCE(ring->last_to_use, ring->next_to_use); 2095 return; 2096 } 2097 2098 ring->pending_buf += num; 2099 2100 if (!doorbell) { 2101 hns3_ring_stats_update(ring, tx_more); 2102 return; 2103 } 2104 2105 if (ring->tqp->mem_base) 2106 hns3_tx_mem_doorbell(ring); 2107 else 2108 writel(ring->pending_buf, 2109 ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); 2110 2111 ring->pending_buf = 0; 2112 WRITE_ONCE(ring->last_to_use, ring->next_to_use); 2113 } 2114 2115 static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb, 2116 struct hns3_desc *desc) 2117 { 2118 struct hnae3_handle *h = hns3_get_handle(netdev); 2119 2120 if (!(h->ae_algo->ops->set_tx_hwts_info && 2121 h->ae_algo->ops->set_tx_hwts_info(h, skb))) 2122 return; 2123 2124 desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B)); 2125 } 2126 2127 static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring, 2128 struct sk_buff *skb) 2129 { 2130 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 2131 unsigned int type = DESC_TYPE_BOUNCE_HEAD; 2132 unsigned int size = skb_headlen(skb); 2133 dma_addr_t dma; 2134 int bd_num = 0; 2135 u32 cb_len; 2136 void *buf; 2137 int ret; 2138 2139 if (skb->len <= ring->tx_copybreak) { 2140 size = skb->len; 2141 type = DESC_TYPE_BOUNCE_ALL; 2142 } 2143 2144 /* hns3_can_use_tx_bounce() is called to ensure the below 2145 * function can always return the tx buffer. 2146 */ 2147 buf = hns3_tx_spare_alloc(ring, size, &dma, &cb_len); 2148 2149 ret = skb_copy_bits(skb, 0, buf, size); 2150 if (unlikely(ret < 0)) { 2151 hns3_tx_spare_rollback(ring, cb_len); 2152 hns3_ring_stats_update(ring, copy_bits_err); 2153 return ret; 2154 } 2155 2156 desc_cb->priv = skb; 2157 desc_cb->length = cb_len; 2158 desc_cb->dma = dma; 2159 desc_cb->type = type; 2160 2161 bd_num += hns3_fill_desc(ring, dma, size); 2162 2163 if (type == DESC_TYPE_BOUNCE_HEAD) { 2164 ret = hns3_fill_skb_to_desc(ring, skb, 2165 DESC_TYPE_BOUNCE_HEAD); 2166 if (unlikely(ret < 0)) 2167 return ret; 2168 2169 bd_num += ret; 2170 } 2171 2172 dma_sync_single_for_device(ring_to_dev(ring), dma, size, 2173 DMA_TO_DEVICE); 2174 2175 hns3_ring_stats_update(ring, tx_bounce); 2176 2177 return bd_num; 2178 } 2179 2180 static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, 2181 struct sk_buff *skb) 2182 { 2183 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 2184 u32 nfrag = skb_shinfo(skb)->nr_frags + 1; 2185 struct sg_table *sgt; 2186 int i, bd_num = 0; 2187 dma_addr_t dma; 2188 u32 cb_len; 2189 int nents; 2190 2191 if (skb_has_frag_list(skb)) 2192 nfrag = HNS3_MAX_TSO_BD_NUM; 2193 2194 /* hns3_can_use_tx_sgl() is called to ensure the below 2195 * function can always return the tx buffer. 2196 */ 2197 sgt = hns3_tx_spare_alloc(ring, HNS3_SGL_SIZE(nfrag), 2198 &dma, &cb_len); 2199 2200 /* scatterlist follows by the sg table */ 2201 sgt->sgl = (struct scatterlist *)(sgt + 1); 2202 sg_init_table(sgt->sgl, nfrag); 2203 nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); 2204 if (unlikely(nents < 0)) { 2205 hns3_tx_spare_rollback(ring, cb_len); 2206 hns3_ring_stats_update(ring, skb2sgl_err); 2207 return -ENOMEM; 2208 } 2209 2210 sgt->orig_nents = nents; 2211 sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, 2212 DMA_TO_DEVICE); 2213 if (unlikely(!sgt->nents)) { 2214 hns3_tx_spare_rollback(ring, cb_len); 2215 hns3_ring_stats_update(ring, map_sg_err); 2216 return -ENOMEM; 2217 } 2218 2219 desc_cb->priv = skb; 2220 desc_cb->length = cb_len; 2221 desc_cb->dma = dma; 2222 desc_cb->type = DESC_TYPE_SGL_SKB; 2223 2224 for (i = 0; i < sgt->nents; i++) 2225 bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i), 2226 sg_dma_len(sgt->sgl + i)); 2227 hns3_ring_stats_update(ring, tx_sgl); 2228 2229 return bd_num; 2230 } 2231 2232 static int hns3_handle_desc_filling(struct hns3_enet_ring *ring, 2233 struct sk_buff *skb) 2234 { 2235 u32 space; 2236 2237 if (!ring->tx_spare) 2238 goto out; 2239 2240 space = hns3_tx_spare_space(ring); 2241 2242 if (hns3_can_use_tx_sgl(ring, skb, space)) 2243 return hns3_handle_tx_sgl(ring, skb); 2244 2245 if (hns3_can_use_tx_bounce(ring, skb, space)) 2246 return hns3_handle_tx_bounce(ring, skb); 2247 2248 out: 2249 return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); 2250 } 2251 2252 static int hns3_handle_skb_desc(struct hns3_enet_ring *ring, 2253 struct sk_buff *skb, 2254 struct hns3_desc_cb *desc_cb, 2255 int next_to_use_head) 2256 { 2257 int ret; 2258 2259 ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], 2260 desc_cb); 2261 if (unlikely(ret < 0)) 2262 goto fill_err; 2263 2264 /* 'ret < 0' means filling error, 'ret == 0' means skb->len is 2265 * zero, which is unlikely, and 'ret > 0' means how many tx desc 2266 * need to be notified to the hw. 2267 */ 2268 ret = hns3_handle_desc_filling(ring, skb); 2269 if (likely(ret > 0)) 2270 return ret; 2271 2272 fill_err: 2273 hns3_clear_desc(ring, next_to_use_head); 2274 return ret; 2275 } 2276 2277 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 2278 { 2279 struct hns3_nic_priv *priv = netdev_priv(netdev); 2280 struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; 2281 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 2282 struct netdev_queue *dev_queue; 2283 int pre_ntu, ret; 2284 bool doorbell; 2285 2286 /* Hardware can only handle short frames above 32 bytes */ 2287 if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { 2288 hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); 2289 2290 hns3_ring_stats_update(ring, sw_err_cnt); 2291 2292 return NETDEV_TX_OK; 2293 } 2294 2295 /* Prefetch the data used later */ 2296 prefetch(skb->data); 2297 2298 ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); 2299 if (unlikely(ret <= 0)) { 2300 if (ret == -EBUSY) { 2301 hns3_tx_doorbell(ring, 0, true); 2302 return NETDEV_TX_BUSY; 2303 } 2304 2305 hns3_rl_err(netdev, "xmit error: %d!\n", ret); 2306 goto out_err_tx_ok; 2307 } 2308 2309 ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use); 2310 if (unlikely(ret <= 0)) 2311 goto out_err_tx_ok; 2312 2313 pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : 2314 (ring->desc_num - 1); 2315 2316 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 2317 hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]); 2318 2319 ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= 2320 cpu_to_le16(BIT(HNS3_TXD_FE_B)); 2321 trace_hns3_tx_desc(ring, pre_ntu); 2322 2323 skb_tx_timestamp(skb); 2324 2325 /* Complete translate all packets */ 2326 dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); 2327 doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes, 2328 netdev_xmit_more()); 2329 hns3_tx_doorbell(ring, ret, doorbell); 2330 2331 return NETDEV_TX_OK; 2332 2333 out_err_tx_ok: 2334 dev_kfree_skb_any(skb); 2335 hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); 2336 return NETDEV_TX_OK; 2337 } 2338 2339 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) 2340 { 2341 char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN]; 2342 char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN]; 2343 struct hnae3_handle *h = hns3_get_handle(netdev); 2344 struct sockaddr *mac_addr = p; 2345 int ret; 2346 2347 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 2348 return -EADDRNOTAVAIL; 2349 2350 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { 2351 hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); 2352 netdev_info(netdev, "already using mac address %s\n", 2353 format_mac_addr_sa); 2354 return 0; 2355 } 2356 2357 /* For VF device, if there is a perm_addr, then the user will not 2358 * be allowed to change the address. 2359 */ 2360 if (!hns3_is_phys_func(h->pdev) && 2361 !is_zero_ether_addr(netdev->perm_addr)) { 2362 hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr); 2363 hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); 2364 netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n", 2365 format_mac_addr_perm, format_mac_addr_sa); 2366 return -EPERM; 2367 } 2368 2369 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); 2370 if (ret) { 2371 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); 2372 return ret; 2373 } 2374 2375 eth_hw_addr_set(netdev, mac_addr->sa_data); 2376 2377 return 0; 2378 } 2379 2380 static int hns3_nic_do_ioctl(struct net_device *netdev, 2381 struct ifreq *ifr, int cmd) 2382 { 2383 struct hnae3_handle *h = hns3_get_handle(netdev); 2384 2385 if (!netif_running(netdev)) 2386 return -EINVAL; 2387 2388 if (!h->ae_algo->ops->do_ioctl) 2389 return -EOPNOTSUPP; 2390 2391 return h->ae_algo->ops->do_ioctl(h, ifr, cmd); 2392 } 2393 2394 static int hns3_nic_set_features(struct net_device *netdev, 2395 netdev_features_t features) 2396 { 2397 netdev_features_t changed = netdev->features ^ features; 2398 struct hns3_nic_priv *priv = netdev_priv(netdev); 2399 struct hnae3_handle *h = priv->ae_handle; 2400 bool enable; 2401 int ret; 2402 2403 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { 2404 enable = !!(features & NETIF_F_GRO_HW); 2405 ret = h->ae_algo->ops->set_gro_en(h, enable); 2406 if (ret) 2407 return ret; 2408 } 2409 2410 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && 2411 h->ae_algo->ops->enable_hw_strip_rxvtag) { 2412 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 2413 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); 2414 if (ret) 2415 return ret; 2416 } 2417 2418 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { 2419 enable = !!(features & NETIF_F_NTUPLE); 2420 h->ae_algo->ops->enable_fd(h, enable); 2421 } 2422 2423 if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && 2424 h->ae_algo->ops->cls_flower_active(h)) { 2425 netdev_err(netdev, 2426 "there are offloaded TC filters active, cannot disable HW TC offload"); 2427 return -EINVAL; 2428 } 2429 2430 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && 2431 h->ae_algo->ops->enable_vlan_filter) { 2432 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); 2433 ret = h->ae_algo->ops->enable_vlan_filter(h, enable); 2434 if (ret) 2435 return ret; 2436 } 2437 2438 netdev->features = features; 2439 return 0; 2440 } 2441 2442 static netdev_features_t hns3_features_check(struct sk_buff *skb, 2443 struct net_device *dev, 2444 netdev_features_t features) 2445 { 2446 #define HNS3_MAX_HDR_LEN 480U 2447 #define HNS3_MAX_L4_HDR_LEN 60U 2448 2449 size_t len; 2450 2451 if (skb->ip_summed != CHECKSUM_PARTIAL) 2452 return features; 2453 2454 if (skb->encapsulation) 2455 len = skb_inner_transport_header(skb) - skb->data; 2456 else 2457 len = skb_transport_header(skb) - skb->data; 2458 2459 /* Assume L4 is 60 byte as TCP is the only protocol with a 2460 * a flexible value, and it's max len is 60 bytes. 2461 */ 2462 len += HNS3_MAX_L4_HDR_LEN; 2463 2464 /* Hardware only supports checksum on the skb with a max header 2465 * len of 480 bytes. 2466 */ 2467 if (len > HNS3_MAX_HDR_LEN) 2468 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2469 2470 return features; 2471 } 2472 2473 static void hns3_fetch_stats(struct rtnl_link_stats64 *stats, 2474 struct hns3_enet_ring *ring, bool is_tx) 2475 { 2476 unsigned int start; 2477 2478 do { 2479 start = u64_stats_fetch_begin_irq(&ring->syncp); 2480 if (is_tx) { 2481 stats->tx_bytes += ring->stats.tx_bytes; 2482 stats->tx_packets += ring->stats.tx_pkts; 2483 stats->tx_dropped += ring->stats.sw_err_cnt; 2484 stats->tx_dropped += ring->stats.tx_vlan_err; 2485 stats->tx_dropped += ring->stats.tx_l4_proto_err; 2486 stats->tx_dropped += ring->stats.tx_l2l3l4_err; 2487 stats->tx_dropped += ring->stats.tx_tso_err; 2488 stats->tx_dropped += ring->stats.over_max_recursion; 2489 stats->tx_dropped += ring->stats.hw_limitation; 2490 stats->tx_dropped += ring->stats.copy_bits_err; 2491 stats->tx_dropped += ring->stats.skb2sgl_err; 2492 stats->tx_dropped += ring->stats.map_sg_err; 2493 stats->tx_errors += ring->stats.sw_err_cnt; 2494 stats->tx_errors += ring->stats.tx_vlan_err; 2495 stats->tx_errors += ring->stats.tx_l4_proto_err; 2496 stats->tx_errors += ring->stats.tx_l2l3l4_err; 2497 stats->tx_errors += ring->stats.tx_tso_err; 2498 stats->tx_errors += ring->stats.over_max_recursion; 2499 stats->tx_errors += ring->stats.hw_limitation; 2500 stats->tx_errors += ring->stats.copy_bits_err; 2501 stats->tx_errors += ring->stats.skb2sgl_err; 2502 stats->tx_errors += ring->stats.map_sg_err; 2503 } else { 2504 stats->rx_bytes += ring->stats.rx_bytes; 2505 stats->rx_packets += ring->stats.rx_pkts; 2506 stats->rx_dropped += ring->stats.l2_err; 2507 stats->rx_errors += ring->stats.l2_err; 2508 stats->rx_errors += ring->stats.l3l4_csum_err; 2509 stats->rx_crc_errors += ring->stats.l2_err; 2510 stats->multicast += ring->stats.rx_multicast; 2511 stats->rx_length_errors += ring->stats.err_pkt_len; 2512 } 2513 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 2514 } 2515 2516 static void hns3_nic_get_stats64(struct net_device *netdev, 2517 struct rtnl_link_stats64 *stats) 2518 { 2519 struct hns3_nic_priv *priv = netdev_priv(netdev); 2520 int queue_num = priv->ae_handle->kinfo.num_tqps; 2521 struct hnae3_handle *handle = priv->ae_handle; 2522 struct rtnl_link_stats64 ring_total_stats; 2523 struct hns3_enet_ring *ring; 2524 unsigned int idx; 2525 2526 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 2527 return; 2528 2529 handle->ae_algo->ops->update_stats(handle, &netdev->stats); 2530 2531 memset(&ring_total_stats, 0, sizeof(ring_total_stats)); 2532 for (idx = 0; idx < queue_num; idx++) { 2533 /* fetch the tx stats */ 2534 ring = &priv->ring[idx]; 2535 hns3_fetch_stats(&ring_total_stats, ring, true); 2536 2537 /* fetch the rx stats */ 2538 ring = &priv->ring[idx + queue_num]; 2539 hns3_fetch_stats(&ring_total_stats, ring, false); 2540 } 2541 2542 stats->tx_bytes = ring_total_stats.tx_bytes; 2543 stats->tx_packets = ring_total_stats.tx_packets; 2544 stats->rx_bytes = ring_total_stats.rx_bytes; 2545 stats->rx_packets = ring_total_stats.rx_packets; 2546 2547 stats->rx_errors = ring_total_stats.rx_errors; 2548 stats->multicast = ring_total_stats.multicast; 2549 stats->rx_length_errors = ring_total_stats.rx_length_errors; 2550 stats->rx_crc_errors = ring_total_stats.rx_crc_errors; 2551 stats->rx_missed_errors = netdev->stats.rx_missed_errors; 2552 2553 stats->tx_errors = ring_total_stats.tx_errors; 2554 stats->rx_dropped = ring_total_stats.rx_dropped; 2555 stats->tx_dropped = ring_total_stats.tx_dropped; 2556 stats->collisions = netdev->stats.collisions; 2557 stats->rx_over_errors = netdev->stats.rx_over_errors; 2558 stats->rx_frame_errors = netdev->stats.rx_frame_errors; 2559 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; 2560 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; 2561 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; 2562 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; 2563 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; 2564 stats->tx_window_errors = netdev->stats.tx_window_errors; 2565 stats->rx_compressed = netdev->stats.rx_compressed; 2566 stats->tx_compressed = netdev->stats.tx_compressed; 2567 } 2568 2569 static int hns3_setup_tc(struct net_device *netdev, void *type_data) 2570 { 2571 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 2572 struct hnae3_knic_private_info *kinfo; 2573 u8 tc = mqprio_qopt->qopt.num_tc; 2574 u16 mode = mqprio_qopt->mode; 2575 u8 hw = mqprio_qopt->qopt.hw; 2576 struct hnae3_handle *h; 2577 2578 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && 2579 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) 2580 return -EOPNOTSUPP; 2581 2582 if (tc > HNAE3_MAX_TC) 2583 return -EINVAL; 2584 2585 if (!netdev) 2586 return -EINVAL; 2587 2588 h = hns3_get_handle(netdev); 2589 kinfo = &h->kinfo; 2590 2591 netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); 2592 2593 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? 2594 kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP; 2595 } 2596 2597 static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv, 2598 struct flow_cls_offload *flow) 2599 { 2600 int tc = tc_classid_to_hwtc(priv->netdev, flow->classid); 2601 struct hnae3_handle *h = hns3_get_handle(priv->netdev); 2602 2603 switch (flow->command) { 2604 case FLOW_CLS_REPLACE: 2605 if (h->ae_algo->ops->add_cls_flower) 2606 return h->ae_algo->ops->add_cls_flower(h, flow, tc); 2607 break; 2608 case FLOW_CLS_DESTROY: 2609 if (h->ae_algo->ops->del_cls_flower) 2610 return h->ae_algo->ops->del_cls_flower(h, flow); 2611 break; 2612 default: 2613 break; 2614 } 2615 2616 return -EOPNOTSUPP; 2617 } 2618 2619 static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 2620 void *cb_priv) 2621 { 2622 struct hns3_nic_priv *priv = cb_priv; 2623 2624 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) 2625 return -EOPNOTSUPP; 2626 2627 switch (type) { 2628 case TC_SETUP_CLSFLOWER: 2629 return hns3_setup_tc_cls_flower(priv, type_data); 2630 default: 2631 return -EOPNOTSUPP; 2632 } 2633 } 2634 2635 static LIST_HEAD(hns3_block_cb_list); 2636 2637 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, 2638 void *type_data) 2639 { 2640 struct hns3_nic_priv *priv = netdev_priv(dev); 2641 int ret; 2642 2643 switch (type) { 2644 case TC_SETUP_QDISC_MQPRIO: 2645 ret = hns3_setup_tc(dev, type_data); 2646 break; 2647 case TC_SETUP_BLOCK: 2648 ret = flow_block_cb_setup_simple(type_data, 2649 &hns3_block_cb_list, 2650 hns3_setup_tc_block_cb, 2651 priv, priv, true); 2652 break; 2653 default: 2654 return -EOPNOTSUPP; 2655 } 2656 2657 return ret; 2658 } 2659 2660 static int hns3_vlan_rx_add_vid(struct net_device *netdev, 2661 __be16 proto, u16 vid) 2662 { 2663 struct hnae3_handle *h = hns3_get_handle(netdev); 2664 int ret = -EIO; 2665 2666 if (h->ae_algo->ops->set_vlan_filter) 2667 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); 2668 2669 return ret; 2670 } 2671 2672 static int hns3_vlan_rx_kill_vid(struct net_device *netdev, 2673 __be16 proto, u16 vid) 2674 { 2675 struct hnae3_handle *h = hns3_get_handle(netdev); 2676 int ret = -EIO; 2677 2678 if (h->ae_algo->ops->set_vlan_filter) 2679 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); 2680 2681 return ret; 2682 } 2683 2684 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 2685 u8 qos, __be16 vlan_proto) 2686 { 2687 struct hnae3_handle *h = hns3_get_handle(netdev); 2688 int ret = -EIO; 2689 2690 netif_dbg(h, drv, netdev, 2691 "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n", 2692 vf, vlan, qos, ntohs(vlan_proto)); 2693 2694 if (h->ae_algo->ops->set_vf_vlan_filter) 2695 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, 2696 qos, vlan_proto); 2697 2698 return ret; 2699 } 2700 2701 static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) 2702 { 2703 struct hnae3_handle *handle = hns3_get_handle(netdev); 2704 2705 if (hns3_nic_resetting(netdev)) 2706 return -EBUSY; 2707 2708 if (!handle->ae_algo->ops->set_vf_spoofchk) 2709 return -EOPNOTSUPP; 2710 2711 return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); 2712 } 2713 2714 static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable) 2715 { 2716 struct hnae3_handle *handle = hns3_get_handle(netdev); 2717 2718 if (!handle->ae_algo->ops->set_vf_trust) 2719 return -EOPNOTSUPP; 2720 2721 return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); 2722 } 2723 2724 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) 2725 { 2726 struct hnae3_handle *h = hns3_get_handle(netdev); 2727 int ret; 2728 2729 if (hns3_nic_resetting(netdev)) 2730 return -EBUSY; 2731 2732 if (!h->ae_algo->ops->set_mtu) 2733 return -EOPNOTSUPP; 2734 2735 netif_dbg(h, drv, netdev, 2736 "change mtu from %u to %d\n", netdev->mtu, new_mtu); 2737 2738 ret = h->ae_algo->ops->set_mtu(h, new_mtu); 2739 if (ret) 2740 netdev_err(netdev, "failed to change MTU in hardware %d\n", 2741 ret); 2742 else 2743 netdev->mtu = new_mtu; 2744 2745 return ret; 2746 } 2747 2748 static int hns3_get_timeout_queue(struct net_device *ndev) 2749 { 2750 int i; 2751 2752 /* Find the stopped queue the same way the stack does */ 2753 for (i = 0; i < ndev->num_tx_queues; i++) { 2754 struct netdev_queue *q; 2755 unsigned long trans_start; 2756 2757 q = netdev_get_tx_queue(ndev, i); 2758 trans_start = READ_ONCE(q->trans_start); 2759 if (netif_xmit_stopped(q) && 2760 time_after(jiffies, 2761 (trans_start + ndev->watchdog_timeo))) { 2762 #ifdef CONFIG_BQL 2763 struct dql *dql = &q->dql; 2764 2765 netdev_info(ndev, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n", 2766 dql->last_obj_cnt, dql->num_queued, 2767 dql->adj_limit, dql->num_completed); 2768 #endif 2769 netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", 2770 q->state, 2771 jiffies_to_msecs(jiffies - trans_start)); 2772 break; 2773 } 2774 } 2775 2776 return i; 2777 } 2778 2779 static void hns3_dump_queue_stats(struct net_device *ndev, 2780 struct hns3_enet_ring *tx_ring, 2781 int timeout_queue) 2782 { 2783 struct napi_struct *napi = &tx_ring->tqp_vector->napi; 2784 struct hns3_nic_priv *priv = netdev_priv(ndev); 2785 2786 netdev_info(ndev, 2787 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", 2788 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, 2789 tx_ring->next_to_clean, napi->state); 2790 2791 netdev_info(ndev, 2792 "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n", 2793 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, 2794 tx_ring->stats.sw_err_cnt, tx_ring->pending_buf); 2795 2796 netdev_info(ndev, 2797 "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", 2798 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, 2799 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); 2800 2801 netdev_info(ndev, "tx_push: %llu, tx_mem_doorbell: %llu\n", 2802 tx_ring->stats.tx_push, tx_ring->stats.tx_mem_doorbell); 2803 } 2804 2805 static void hns3_dump_queue_reg(struct net_device *ndev, 2806 struct hns3_enet_ring *tx_ring) 2807 { 2808 netdev_info(ndev, 2809 "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", 2810 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_NUM_REG), 2811 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_HEAD_REG), 2812 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TAIL_REG), 2813 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_ERR_REG), 2814 readl(tx_ring->tqp_vector->mask_addr)); 2815 netdev_info(ndev, 2816 "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", 2817 hns3_tqp_read_reg(tx_ring, HNS3_RING_EN_REG), 2818 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TC_REG), 2819 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_FBDNUM_REG), 2820 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_OFFSET_REG), 2821 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_EBDNUM_REG), 2822 hns3_tqp_read_reg(tx_ring, 2823 HNS3_RING_TX_RING_EBD_OFFSET_REG)); 2824 } 2825 2826 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) 2827 { 2828 struct hns3_nic_priv *priv = netdev_priv(ndev); 2829 struct hnae3_handle *h = hns3_get_handle(ndev); 2830 struct hns3_enet_ring *tx_ring; 2831 int timeout_queue; 2832 2833 timeout_queue = hns3_get_timeout_queue(ndev); 2834 if (timeout_queue >= ndev->num_tx_queues) { 2835 netdev_info(ndev, 2836 "no netdev TX timeout queue found, timeout count: %llu\n", 2837 priv->tx_timeout_count); 2838 return false; 2839 } 2840 2841 priv->tx_timeout_count++; 2842 2843 tx_ring = &priv->ring[timeout_queue]; 2844 hns3_dump_queue_stats(ndev, tx_ring, timeout_queue); 2845 2846 /* When mac received many pause frames continuous, it's unable to send 2847 * packets, which may cause tx timeout 2848 */ 2849 if (h->ae_algo->ops->get_mac_stats) { 2850 struct hns3_mac_stats mac_stats; 2851 2852 h->ae_algo->ops->get_mac_stats(h, &mac_stats); 2853 netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", 2854 mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); 2855 } 2856 2857 hns3_dump_queue_reg(ndev, tx_ring); 2858 2859 return true; 2860 } 2861 2862 static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue) 2863 { 2864 struct hns3_nic_priv *priv = netdev_priv(ndev); 2865 struct hnae3_handle *h = priv->ae_handle; 2866 2867 if (!hns3_get_tx_timeo_queue_info(ndev)) 2868 return; 2869 2870 /* request the reset, and let the hclge to determine 2871 * which reset level should be done 2872 */ 2873 if (h->ae_algo->ops->reset_event) 2874 h->ae_algo->ops->reset_event(h->pdev, h); 2875 } 2876 2877 #ifdef CONFIG_RFS_ACCEL 2878 static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 2879 u16 rxq_index, u32 flow_id) 2880 { 2881 struct hnae3_handle *h = hns3_get_handle(dev); 2882 struct flow_keys fkeys; 2883 2884 if (!h->ae_algo->ops->add_arfs_entry) 2885 return -EOPNOTSUPP; 2886 2887 if (skb->encapsulation) 2888 return -EPROTONOSUPPORT; 2889 2890 if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0)) 2891 return -EPROTONOSUPPORT; 2892 2893 if ((fkeys.basic.n_proto != htons(ETH_P_IP) && 2894 fkeys.basic.n_proto != htons(ETH_P_IPV6)) || 2895 (fkeys.basic.ip_proto != IPPROTO_TCP && 2896 fkeys.basic.ip_proto != IPPROTO_UDP)) 2897 return -EPROTONOSUPPORT; 2898 2899 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); 2900 } 2901 #endif 2902 2903 static int hns3_nic_get_vf_config(struct net_device *ndev, int vf, 2904 struct ifla_vf_info *ivf) 2905 { 2906 struct hnae3_handle *h = hns3_get_handle(ndev); 2907 2908 if (!h->ae_algo->ops->get_vf_config) 2909 return -EOPNOTSUPP; 2910 2911 return h->ae_algo->ops->get_vf_config(h, vf, ivf); 2912 } 2913 2914 static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf, 2915 int link_state) 2916 { 2917 struct hnae3_handle *h = hns3_get_handle(ndev); 2918 2919 if (!h->ae_algo->ops->set_vf_link_state) 2920 return -EOPNOTSUPP; 2921 2922 return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); 2923 } 2924 2925 static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, 2926 int min_tx_rate, int max_tx_rate) 2927 { 2928 struct hnae3_handle *h = hns3_get_handle(ndev); 2929 2930 if (!h->ae_algo->ops->set_vf_rate) 2931 return -EOPNOTSUPP; 2932 2933 return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, 2934 false); 2935 } 2936 2937 static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) 2938 { 2939 struct hnae3_handle *h = hns3_get_handle(netdev); 2940 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 2941 2942 if (!h->ae_algo->ops->set_vf_mac) 2943 return -EOPNOTSUPP; 2944 2945 if (is_multicast_ether_addr(mac)) { 2946 hnae3_format_mac_addr(format_mac_addr, mac); 2947 netdev_err(netdev, 2948 "Invalid MAC:%s specified. Could not set MAC\n", 2949 format_mac_addr); 2950 return -EINVAL; 2951 } 2952 2953 return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); 2954 } 2955 2956 static const struct net_device_ops hns3_nic_netdev_ops = { 2957 .ndo_open = hns3_nic_net_open, 2958 .ndo_stop = hns3_nic_net_stop, 2959 .ndo_start_xmit = hns3_nic_net_xmit, 2960 .ndo_tx_timeout = hns3_nic_net_timeout, 2961 .ndo_set_mac_address = hns3_nic_net_set_mac_address, 2962 .ndo_eth_ioctl = hns3_nic_do_ioctl, 2963 .ndo_change_mtu = hns3_nic_change_mtu, 2964 .ndo_set_features = hns3_nic_set_features, 2965 .ndo_features_check = hns3_features_check, 2966 .ndo_get_stats64 = hns3_nic_get_stats64, 2967 .ndo_setup_tc = hns3_nic_setup_tc, 2968 .ndo_set_rx_mode = hns3_nic_set_rx_mode, 2969 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, 2970 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, 2971 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, 2972 .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk, 2973 .ndo_set_vf_trust = hns3_set_vf_trust, 2974 #ifdef CONFIG_RFS_ACCEL 2975 .ndo_rx_flow_steer = hns3_rx_flow_steer, 2976 #endif 2977 .ndo_get_vf_config = hns3_nic_get_vf_config, 2978 .ndo_set_vf_link_state = hns3_nic_set_vf_link_state, 2979 .ndo_set_vf_rate = hns3_nic_set_vf_rate, 2980 .ndo_set_vf_mac = hns3_nic_set_vf_mac, 2981 }; 2982 2983 bool hns3_is_phys_func(struct pci_dev *pdev) 2984 { 2985 u32 dev_id = pdev->device; 2986 2987 switch (dev_id) { 2988 case HNAE3_DEV_ID_GE: 2989 case HNAE3_DEV_ID_25GE: 2990 case HNAE3_DEV_ID_25GE_RDMA: 2991 case HNAE3_DEV_ID_25GE_RDMA_MACSEC: 2992 case HNAE3_DEV_ID_50GE_RDMA: 2993 case HNAE3_DEV_ID_50GE_RDMA_MACSEC: 2994 case HNAE3_DEV_ID_100G_RDMA_MACSEC: 2995 case HNAE3_DEV_ID_200G_RDMA: 2996 return true; 2997 case HNAE3_DEV_ID_VF: 2998 case HNAE3_DEV_ID_RDMA_DCB_PFC_VF: 2999 return false; 3000 default: 3001 dev_warn(&pdev->dev, "un-recognized pci device-id %u", 3002 dev_id); 3003 } 3004 3005 return false; 3006 } 3007 3008 static void hns3_disable_sriov(struct pci_dev *pdev) 3009 { 3010 /* If our VFs are assigned we cannot shut down SR-IOV 3011 * without causing issues, so just leave the hardware 3012 * available but disabled 3013 */ 3014 if (pci_vfs_assigned(pdev)) { 3015 dev_warn(&pdev->dev, 3016 "disabling driver while VFs are assigned\n"); 3017 return; 3018 } 3019 3020 pci_disable_sriov(pdev); 3021 } 3022 3023 /* hns3_probe - Device initialization routine 3024 * @pdev: PCI device information struct 3025 * @ent: entry in hns3_pci_tbl 3026 * 3027 * hns3_probe initializes a PF identified by a pci_dev structure. 3028 * The OS initialization, configuring of the PF private structure, 3029 * and a hardware reset occur. 3030 * 3031 * Returns 0 on success, negative on failure 3032 */ 3033 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3034 { 3035 struct hnae3_ae_dev *ae_dev; 3036 int ret; 3037 3038 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); 3039 if (!ae_dev) 3040 return -ENOMEM; 3041 3042 ae_dev->pdev = pdev; 3043 ae_dev->flag = ent->driver_data; 3044 pci_set_drvdata(pdev, ae_dev); 3045 3046 ret = hnae3_register_ae_dev(ae_dev); 3047 if (ret) 3048 pci_set_drvdata(pdev, NULL); 3049 3050 return ret; 3051 } 3052 3053 /* hns3_remove - Device removal routine 3054 * @pdev: PCI device information struct 3055 */ 3056 static void hns3_remove(struct pci_dev *pdev) 3057 { 3058 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3059 3060 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) 3061 hns3_disable_sriov(pdev); 3062 3063 hnae3_unregister_ae_dev(ae_dev); 3064 pci_set_drvdata(pdev, NULL); 3065 } 3066 3067 /** 3068 * hns3_pci_sriov_configure 3069 * @pdev: pointer to a pci_dev structure 3070 * @num_vfs: number of VFs to allocate 3071 * 3072 * Enable or change the number of VFs. Called when the user updates the number 3073 * of VFs in sysfs. 3074 **/ 3075 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 3076 { 3077 int ret; 3078 3079 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { 3080 dev_warn(&pdev->dev, "Can not config SRIOV\n"); 3081 return -EINVAL; 3082 } 3083 3084 if (num_vfs) { 3085 ret = pci_enable_sriov(pdev, num_vfs); 3086 if (ret) 3087 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); 3088 else 3089 return num_vfs; 3090 } else if (!pci_vfs_assigned(pdev)) { 3091 pci_disable_sriov(pdev); 3092 } else { 3093 dev_warn(&pdev->dev, 3094 "Unable to free VFs because some are assigned to VMs.\n"); 3095 } 3096 3097 return 0; 3098 } 3099 3100 static void hns3_shutdown(struct pci_dev *pdev) 3101 { 3102 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3103 3104 hnae3_unregister_ae_dev(ae_dev); 3105 pci_set_drvdata(pdev, NULL); 3106 3107 if (system_state == SYSTEM_POWER_OFF) 3108 pci_set_power_state(pdev, PCI_D3hot); 3109 } 3110 3111 static int __maybe_unused hns3_suspend(struct device *dev) 3112 { 3113 struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); 3114 3115 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { 3116 dev_info(dev, "Begin to suspend.\n"); 3117 if (ae_dev->ops && ae_dev->ops->reset_prepare) 3118 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET); 3119 } 3120 3121 return 0; 3122 } 3123 3124 static int __maybe_unused hns3_resume(struct device *dev) 3125 { 3126 struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); 3127 3128 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { 3129 dev_info(dev, "Begin to resume.\n"); 3130 if (ae_dev->ops && ae_dev->ops->reset_done) 3131 ae_dev->ops->reset_done(ae_dev); 3132 } 3133 3134 return 0; 3135 } 3136 3137 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, 3138 pci_channel_state_t state) 3139 { 3140 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3141 pci_ers_result_t ret; 3142 3143 dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state); 3144 3145 if (state == pci_channel_io_perm_failure) 3146 return PCI_ERS_RESULT_DISCONNECT; 3147 3148 if (!ae_dev || !ae_dev->ops) { 3149 dev_err(&pdev->dev, 3150 "Can't recover - error happened before device initialized\n"); 3151 return PCI_ERS_RESULT_NONE; 3152 } 3153 3154 if (ae_dev->ops->handle_hw_ras_error) 3155 ret = ae_dev->ops->handle_hw_ras_error(ae_dev); 3156 else 3157 return PCI_ERS_RESULT_NONE; 3158 3159 return ret; 3160 } 3161 3162 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) 3163 { 3164 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3165 const struct hnae3_ae_ops *ops; 3166 enum hnae3_reset_type reset_type; 3167 struct device *dev = &pdev->dev; 3168 3169 if (!ae_dev || !ae_dev->ops) 3170 return PCI_ERS_RESULT_NONE; 3171 3172 ops = ae_dev->ops; 3173 /* request the reset */ 3174 if (ops->reset_event && ops->get_reset_level && 3175 ops->set_default_reset_request) { 3176 if (ae_dev->hw_err_reset_req) { 3177 reset_type = ops->get_reset_level(ae_dev, 3178 &ae_dev->hw_err_reset_req); 3179 ops->set_default_reset_request(ae_dev, reset_type); 3180 dev_info(dev, "requesting reset due to PCI error\n"); 3181 ops->reset_event(pdev, NULL); 3182 } 3183 3184 return PCI_ERS_RESULT_RECOVERED; 3185 } 3186 3187 return PCI_ERS_RESULT_DISCONNECT; 3188 } 3189 3190 static void hns3_reset_prepare(struct pci_dev *pdev) 3191 { 3192 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3193 3194 dev_info(&pdev->dev, "FLR prepare\n"); 3195 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare) 3196 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET); 3197 } 3198 3199 static void hns3_reset_done(struct pci_dev *pdev) 3200 { 3201 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3202 3203 dev_info(&pdev->dev, "FLR done\n"); 3204 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done) 3205 ae_dev->ops->reset_done(ae_dev); 3206 } 3207 3208 static const struct pci_error_handlers hns3_err_handler = { 3209 .error_detected = hns3_error_detected, 3210 .slot_reset = hns3_slot_reset, 3211 .reset_prepare = hns3_reset_prepare, 3212 .reset_done = hns3_reset_done, 3213 }; 3214 3215 static SIMPLE_DEV_PM_OPS(hns3_pm_ops, hns3_suspend, hns3_resume); 3216 3217 static struct pci_driver hns3_driver = { 3218 .name = hns3_driver_name, 3219 .id_table = hns3_pci_tbl, 3220 .probe = hns3_probe, 3221 .remove = hns3_remove, 3222 .shutdown = hns3_shutdown, 3223 .driver.pm = &hns3_pm_ops, 3224 .sriov_configure = hns3_pci_sriov_configure, 3225 .err_handler = &hns3_err_handler, 3226 }; 3227 3228 /* set default feature to hns3 */ 3229 static void hns3_set_default_feature(struct net_device *netdev) 3230 { 3231 struct hnae3_handle *h = hns3_get_handle(netdev); 3232 struct pci_dev *pdev = h->pdev; 3233 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3234 3235 netdev->priv_flags |= IFF_UNICAST_FLT; 3236 3237 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 3238 3239 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | 3240 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 3241 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 3242 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 3243 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 3244 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; 3245 3246 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 3247 netdev->features |= NETIF_F_GRO_HW; 3248 3249 if (!(h->flags & HNAE3_SUPPORT_VF)) 3250 netdev->features |= NETIF_F_NTUPLE; 3251 } 3252 3253 if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) 3254 netdev->features |= NETIF_F_GSO_UDP_L4; 3255 3256 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) 3257 netdev->features |= NETIF_F_HW_CSUM; 3258 else 3259 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3260 3261 if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) 3262 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 3263 3264 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) 3265 netdev->features |= NETIF_F_HW_TC; 3266 3267 netdev->hw_features |= netdev->features; 3268 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 3269 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 3270 3271 netdev->vlan_features |= netdev->features & 3272 ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX | 3273 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_GRO_HW | NETIF_F_NTUPLE | 3274 NETIF_F_HW_TC); 3275 3276 netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID; 3277 } 3278 3279 static int hns3_alloc_buffer(struct hns3_enet_ring *ring, 3280 struct hns3_desc_cb *cb) 3281 { 3282 unsigned int order = hns3_page_order(ring); 3283 struct page *p; 3284 3285 if (ring->page_pool) { 3286 p = page_pool_dev_alloc_frag(ring->page_pool, 3287 &cb->page_offset, 3288 hns3_buf_size(ring)); 3289 if (unlikely(!p)) 3290 return -ENOMEM; 3291 3292 cb->priv = p; 3293 cb->buf = page_address(p); 3294 cb->dma = page_pool_get_dma_addr(p); 3295 cb->type = DESC_TYPE_PP_FRAG; 3296 cb->reuse_flag = 0; 3297 return 0; 3298 } 3299 3300 p = dev_alloc_pages(order); 3301 if (!p) 3302 return -ENOMEM; 3303 3304 cb->priv = p; 3305 cb->page_offset = 0; 3306 cb->reuse_flag = 0; 3307 cb->buf = page_address(p); 3308 cb->length = hns3_page_size(ring); 3309 cb->type = DESC_TYPE_PAGE; 3310 page_ref_add(p, USHRT_MAX - 1); 3311 cb->pagecnt_bias = USHRT_MAX; 3312 3313 return 0; 3314 } 3315 3316 static void hns3_free_buffer(struct hns3_enet_ring *ring, 3317 struct hns3_desc_cb *cb, int budget) 3318 { 3319 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD | 3320 DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB)) 3321 napi_consume_skb(cb->priv, budget); 3322 else if (!HNAE3_IS_TX_RING(ring)) { 3323 if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias) 3324 __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); 3325 else if (cb->type & DESC_TYPE_PP_FRAG) 3326 page_pool_put_full_page(ring->page_pool, cb->priv, 3327 false); 3328 } 3329 memset(cb, 0, sizeof(*cb)); 3330 } 3331 3332 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) 3333 { 3334 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 3335 cb->length, ring_to_dma_dir(ring)); 3336 3337 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) 3338 return -EIO; 3339 3340 return 0; 3341 } 3342 3343 static void hns3_unmap_buffer(struct hns3_enet_ring *ring, 3344 struct hns3_desc_cb *cb) 3345 { 3346 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) 3347 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 3348 ring_to_dma_dir(ring)); 3349 else if ((cb->type & DESC_TYPE_PAGE) && cb->length) 3350 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 3351 ring_to_dma_dir(ring)); 3352 else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD | 3353 DESC_TYPE_SGL_SKB)) 3354 hns3_tx_spare_reclaim_cb(ring, cb); 3355 } 3356 3357 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) 3358 { 3359 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 3360 ring->desc[i].addr = 0; 3361 ring->desc_cb[i].refill = 0; 3362 } 3363 3364 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i, 3365 int budget) 3366 { 3367 struct hns3_desc_cb *cb = &ring->desc_cb[i]; 3368 3369 if (!ring->desc_cb[i].dma) 3370 return; 3371 3372 hns3_buffer_detach(ring, i); 3373 hns3_free_buffer(ring, cb, budget); 3374 } 3375 3376 static void hns3_free_buffers(struct hns3_enet_ring *ring) 3377 { 3378 int i; 3379 3380 for (i = 0; i < ring->desc_num; i++) 3381 hns3_free_buffer_detach(ring, i, 0); 3382 } 3383 3384 /* free desc along with its attached buffer */ 3385 static void hns3_free_desc(struct hns3_enet_ring *ring) 3386 { 3387 int size = ring->desc_num * sizeof(ring->desc[0]); 3388 3389 hns3_free_buffers(ring); 3390 3391 if (ring->desc) { 3392 dma_free_coherent(ring_to_dev(ring), size, 3393 ring->desc, ring->desc_dma_addr); 3394 ring->desc = NULL; 3395 } 3396 } 3397 3398 static int hns3_alloc_desc(struct hns3_enet_ring *ring) 3399 { 3400 int size = ring->desc_num * sizeof(ring->desc[0]); 3401 3402 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, 3403 &ring->desc_dma_addr, GFP_KERNEL); 3404 if (!ring->desc) 3405 return -ENOMEM; 3406 3407 return 0; 3408 } 3409 3410 static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, 3411 struct hns3_desc_cb *cb) 3412 { 3413 int ret; 3414 3415 ret = hns3_alloc_buffer(ring, cb); 3416 if (ret || ring->page_pool) 3417 goto out; 3418 3419 ret = hns3_map_buffer(ring, cb); 3420 if (ret) 3421 goto out_with_buf; 3422 3423 return 0; 3424 3425 out_with_buf: 3426 hns3_free_buffer(ring, cb, 0); 3427 out: 3428 return ret; 3429 } 3430 3431 static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) 3432 { 3433 int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); 3434 3435 if (ret) 3436 return ret; 3437 3438 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + 3439 ring->desc_cb[i].page_offset); 3440 ring->desc_cb[i].refill = 1; 3441 3442 return 0; 3443 } 3444 3445 /* Allocate memory for raw pkg, and map with dma */ 3446 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) 3447 { 3448 int i, j, ret; 3449 3450 for (i = 0; i < ring->desc_num; i++) { 3451 ret = hns3_alloc_and_attach_buffer(ring, i); 3452 if (ret) 3453 goto out_buffer_fail; 3454 } 3455 3456 return 0; 3457 3458 out_buffer_fail: 3459 for (j = i - 1; j >= 0; j--) 3460 hns3_free_buffer_detach(ring, j, 0); 3461 return ret; 3462 } 3463 3464 /* detach a in-used buffer and replace with a reserved one */ 3465 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, 3466 struct hns3_desc_cb *res_cb) 3467 { 3468 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 3469 ring->desc_cb[i] = *res_cb; 3470 ring->desc_cb[i].refill = 1; 3471 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + 3472 ring->desc_cb[i].page_offset); 3473 ring->desc[i].rx.bd_base_info = 0; 3474 } 3475 3476 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) 3477 { 3478 ring->desc_cb[i].reuse_flag = 0; 3479 ring->desc_cb[i].refill = 1; 3480 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + 3481 ring->desc_cb[i].page_offset); 3482 ring->desc[i].rx.bd_base_info = 0; 3483 3484 dma_sync_single_for_device(ring_to_dev(ring), 3485 ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, 3486 hns3_buf_size(ring), 3487 DMA_FROM_DEVICE); 3488 } 3489 3490 static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, 3491 int *bytes, int *pkts, int budget) 3492 { 3493 /* pair with ring->last_to_use update in hns3_tx_doorbell(), 3494 * smp_store_release() is not used in hns3_tx_doorbell() because 3495 * the doorbell operation already have the needed barrier operation. 3496 */ 3497 int ltu = smp_load_acquire(&ring->last_to_use); 3498 int ntc = ring->next_to_clean; 3499 struct hns3_desc_cb *desc_cb; 3500 bool reclaimed = false; 3501 struct hns3_desc *desc; 3502 3503 while (ltu != ntc) { 3504 desc = &ring->desc[ntc]; 3505 3506 if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) & 3507 BIT(HNS3_TXD_VLD_B)) 3508 break; 3509 3510 desc_cb = &ring->desc_cb[ntc]; 3511 3512 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL | 3513 DESC_TYPE_BOUNCE_HEAD | 3514 DESC_TYPE_SGL_SKB)) { 3515 (*pkts)++; 3516 (*bytes) += desc_cb->send_bytes; 3517 } 3518 3519 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ 3520 hns3_free_buffer_detach(ring, ntc, budget); 3521 3522 if (++ntc == ring->desc_num) 3523 ntc = 0; 3524 3525 /* Issue prefetch for next Tx descriptor */ 3526 prefetch(&ring->desc_cb[ntc]); 3527 reclaimed = true; 3528 } 3529 3530 if (unlikely(!reclaimed)) 3531 return false; 3532 3533 /* This smp_store_release() pairs with smp_load_acquire() in 3534 * ring_space called by hns3_nic_net_xmit. 3535 */ 3536 smp_store_release(&ring->next_to_clean, ntc); 3537 3538 hns3_tx_spare_update(ring); 3539 3540 return true; 3541 } 3542 3543 void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) 3544 { 3545 struct net_device *netdev = ring_to_netdev(ring); 3546 struct hns3_nic_priv *priv = netdev_priv(netdev); 3547 struct netdev_queue *dev_queue; 3548 int bytes, pkts; 3549 3550 bytes = 0; 3551 pkts = 0; 3552 3553 if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget))) 3554 return; 3555 3556 ring->tqp_vector->tx_group.total_bytes += bytes; 3557 ring->tqp_vector->tx_group.total_packets += pkts; 3558 3559 u64_stats_update_begin(&ring->syncp); 3560 ring->stats.tx_bytes += bytes; 3561 ring->stats.tx_pkts += pkts; 3562 u64_stats_update_end(&ring->syncp); 3563 3564 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); 3565 netdev_tx_completed_queue(dev_queue, pkts, bytes); 3566 3567 if (unlikely(netif_carrier_ok(netdev) && 3568 ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) { 3569 /* Make sure that anybody stopping the queue after this 3570 * sees the new next_to_clean. 3571 */ 3572 smp_mb(); 3573 if (netif_tx_queue_stopped(dev_queue) && 3574 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 3575 netif_tx_wake_queue(dev_queue); 3576 ring->stats.restart_queue++; 3577 } 3578 } 3579 } 3580 3581 static int hns3_desc_unused(struct hns3_enet_ring *ring) 3582 { 3583 int ntc = ring->next_to_clean; 3584 int ntu = ring->next_to_use; 3585 3586 if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill)) 3587 return ring->desc_num; 3588 3589 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; 3590 } 3591 3592 /* Return true if there is any allocation failure */ 3593 static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, 3594 int cleand_count) 3595 { 3596 struct hns3_desc_cb *desc_cb; 3597 struct hns3_desc_cb res_cbs; 3598 int i, ret; 3599 3600 for (i = 0; i < cleand_count; i++) { 3601 desc_cb = &ring->desc_cb[ring->next_to_use]; 3602 if (desc_cb->reuse_flag) { 3603 hns3_ring_stats_update(ring, reuse_pg_cnt); 3604 3605 hns3_reuse_buffer(ring, ring->next_to_use); 3606 } else { 3607 ret = hns3_alloc_and_map_buffer(ring, &res_cbs); 3608 if (ret) { 3609 hns3_ring_stats_update(ring, sw_err_cnt); 3610 3611 hns3_rl_err(ring_to_netdev(ring), 3612 "alloc rx buffer failed: %d\n", 3613 ret); 3614 3615 writel(i, ring->tqp->io_base + 3616 HNS3_RING_RX_RING_HEAD_REG); 3617 return true; 3618 } 3619 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 3620 3621 hns3_ring_stats_update(ring, non_reuse_pg); 3622 } 3623 3624 ring_ptr_move_fw(ring, next_to_use); 3625 } 3626 3627 writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); 3628 return false; 3629 } 3630 3631 static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) 3632 { 3633 return page_count(cb->priv) == cb->pagecnt_bias; 3634 } 3635 3636 static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i, 3637 struct hns3_enet_ring *ring, 3638 int pull_len, 3639 struct hns3_desc_cb *desc_cb) 3640 { 3641 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; 3642 u32 frag_offset = desc_cb->page_offset + pull_len; 3643 int size = le16_to_cpu(desc->rx.size); 3644 u32 frag_size = size - pull_len; 3645 void *frag = napi_alloc_frag(frag_size); 3646 3647 if (unlikely(!frag)) { 3648 hns3_ring_stats_update(ring, frag_alloc_err); 3649 3650 hns3_rl_err(ring_to_netdev(ring), 3651 "failed to allocate rx frag\n"); 3652 return -ENOMEM; 3653 } 3654 3655 desc_cb->reuse_flag = 1; 3656 memcpy(frag, desc_cb->buf + frag_offset, frag_size); 3657 skb_add_rx_frag(skb, i, virt_to_page(frag), 3658 offset_in_page(frag), frag_size, frag_size); 3659 3660 hns3_ring_stats_update(ring, frag_alloc); 3661 return 0; 3662 } 3663 3664 static void hns3_nic_reuse_page(struct sk_buff *skb, int i, 3665 struct hns3_enet_ring *ring, int pull_len, 3666 struct hns3_desc_cb *desc_cb) 3667 { 3668 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; 3669 u32 frag_offset = desc_cb->page_offset + pull_len; 3670 int size = le16_to_cpu(desc->rx.size); 3671 u32 truesize = hns3_buf_size(ring); 3672 u32 frag_size = size - pull_len; 3673 int ret = 0; 3674 bool reused; 3675 3676 if (ring->page_pool) { 3677 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, 3678 frag_size, truesize); 3679 return; 3680 } 3681 3682 /* Avoid re-using remote or pfmem page */ 3683 if (unlikely(!dev_page_is_reusable(desc_cb->priv))) 3684 goto out; 3685 3686 reused = hns3_can_reuse_page(desc_cb); 3687 3688 /* Rx page can be reused when: 3689 * 1. Rx page is only owned by the driver when page_offset 3690 * is zero, which means 0 @ truesize will be used by 3691 * stack after skb_add_rx_frag() is called, and the rest 3692 * of rx page can be reused by driver. 3693 * Or 3694 * 2. Rx page is only owned by the driver when page_offset 3695 * is non-zero, which means page_offset @ truesize will 3696 * be used by stack after skb_add_rx_frag() is called, 3697 * and 0 @ truesize can be reused by driver. 3698 */ 3699 if ((!desc_cb->page_offset && reused) || 3700 ((desc_cb->page_offset + truesize + truesize) <= 3701 hns3_page_size(ring) && desc_cb->page_offset)) { 3702 desc_cb->page_offset += truesize; 3703 desc_cb->reuse_flag = 1; 3704 } else if (desc_cb->page_offset && reused) { 3705 desc_cb->page_offset = 0; 3706 desc_cb->reuse_flag = 1; 3707 } else if (frag_size <= ring->rx_copybreak) { 3708 ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb); 3709 if (ret) 3710 goto out; 3711 } 3712 3713 out: 3714 desc_cb->pagecnt_bias--; 3715 3716 if (unlikely(!desc_cb->pagecnt_bias)) { 3717 page_ref_add(desc_cb->priv, USHRT_MAX); 3718 desc_cb->pagecnt_bias = USHRT_MAX; 3719 } 3720 3721 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, 3722 frag_size, truesize); 3723 3724 if (unlikely(!desc_cb->reuse_flag)) 3725 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); 3726 } 3727 3728 static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) 3729 { 3730 __be16 type = skb->protocol; 3731 struct tcphdr *th; 3732 int depth = 0; 3733 3734 while (eth_type_vlan(type)) { 3735 struct vlan_hdr *vh; 3736 3737 if ((depth + VLAN_HLEN) > skb_headlen(skb)) 3738 return -EFAULT; 3739 3740 vh = (struct vlan_hdr *)(skb->data + depth); 3741 type = vh->h_vlan_encapsulated_proto; 3742 depth += VLAN_HLEN; 3743 } 3744 3745 skb_set_network_header(skb, depth); 3746 3747 if (type == htons(ETH_P_IP)) { 3748 const struct iphdr *iph = ip_hdr(skb); 3749 3750 depth += sizeof(struct iphdr); 3751 skb_set_transport_header(skb, depth); 3752 th = tcp_hdr(skb); 3753 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, 3754 iph->daddr, 0); 3755 } else if (type == htons(ETH_P_IPV6)) { 3756 const struct ipv6hdr *iph = ipv6_hdr(skb); 3757 3758 depth += sizeof(struct ipv6hdr); 3759 skb_set_transport_header(skb, depth); 3760 th = tcp_hdr(skb); 3761 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, 3762 &iph->daddr, 0); 3763 } else { 3764 hns3_rl_err(skb->dev, 3765 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", 3766 be16_to_cpu(type), depth); 3767 return -EFAULT; 3768 } 3769 3770 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 3771 if (th->cwr) 3772 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 3773 3774 if (l234info & BIT(HNS3_RXD_GRO_FIXID_B)) 3775 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; 3776 3777 skb->csum_start = (unsigned char *)th - skb->head; 3778 skb->csum_offset = offsetof(struct tcphdr, check); 3779 skb->ip_summed = CHECKSUM_PARTIAL; 3780 3781 trace_hns3_gro(skb); 3782 3783 return 0; 3784 } 3785 3786 static bool hns3_checksum_complete(struct hns3_enet_ring *ring, 3787 struct sk_buff *skb, u32 ptype, u16 csum) 3788 { 3789 if (ptype == HNS3_INVALID_PTYPE || 3790 hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE) 3791 return false; 3792 3793 hns3_ring_stats_update(ring, csum_complete); 3794 skb->ip_summed = CHECKSUM_COMPLETE; 3795 skb->csum = csum_unfold((__force __sum16)csum); 3796 3797 return true; 3798 } 3799 3800 static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info, 3801 u32 ol_info, u32 ptype) 3802 { 3803 int l3_type, l4_type; 3804 int ol4_type; 3805 3806 if (ptype != HNS3_INVALID_PTYPE) { 3807 skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level; 3808 skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed; 3809 3810 return; 3811 } 3812 3813 ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M, 3814 HNS3_RXD_OL4ID_S); 3815 switch (ol4_type) { 3816 case HNS3_OL4_TYPE_MAC_IN_UDP: 3817 case HNS3_OL4_TYPE_NVGRE: 3818 skb->csum_level = 1; 3819 fallthrough; 3820 case HNS3_OL4_TYPE_NO_TUN: 3821 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, 3822 HNS3_RXD_L3ID_S); 3823 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, 3824 HNS3_RXD_L4ID_S); 3825 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ 3826 if ((l3_type == HNS3_L3_TYPE_IPV4 || 3827 l3_type == HNS3_L3_TYPE_IPV6) && 3828 (l4_type == HNS3_L4_TYPE_UDP || 3829 l4_type == HNS3_L4_TYPE_TCP || 3830 l4_type == HNS3_L4_TYPE_SCTP)) 3831 skb->ip_summed = CHECKSUM_UNNECESSARY; 3832 break; 3833 default: 3834 break; 3835 } 3836 } 3837 3838 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, 3839 u32 l234info, u32 bd_base_info, u32 ol_info, 3840 u16 csum) 3841 { 3842 struct net_device *netdev = ring_to_netdev(ring); 3843 struct hns3_nic_priv *priv = netdev_priv(netdev); 3844 u32 ptype = HNS3_INVALID_PTYPE; 3845 3846 skb->ip_summed = CHECKSUM_NONE; 3847 3848 skb_checksum_none_assert(skb); 3849 3850 if (!(netdev->features & NETIF_F_RXCSUM)) 3851 return; 3852 3853 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) 3854 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, 3855 HNS3_RXD_PTYPE_S); 3856 3857 if (hns3_checksum_complete(ring, skb, ptype, csum)) 3858 return; 3859 3860 /* check if hardware has done checksum */ 3861 if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) 3862 return; 3863 3864 if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | 3865 BIT(HNS3_RXD_OL3E_B) | 3866 BIT(HNS3_RXD_OL4E_B)))) { 3867 hns3_ring_stats_update(ring, l3l4_csum_err); 3868 3869 return; 3870 } 3871 3872 hns3_rx_handle_csum(skb, l234info, ol_info, ptype); 3873 } 3874 3875 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) 3876 { 3877 if (skb_has_frag_list(skb)) 3878 napi_gro_flush(&ring->tqp_vector->napi, false); 3879 3880 napi_gro_receive(&ring->tqp_vector->napi, skb); 3881 } 3882 3883 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, 3884 struct hns3_desc *desc, u32 l234info, 3885 u16 *vlan_tag) 3886 { 3887 struct hnae3_handle *handle = ring->tqp->handle; 3888 struct pci_dev *pdev = ring->tqp->handle->pdev; 3889 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3890 3891 if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) { 3892 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 3893 if (!(*vlan_tag & VLAN_VID_MASK)) 3894 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 3895 3896 return (*vlan_tag != 0); 3897 } 3898 3899 #define HNS3_STRP_OUTER_VLAN 0x1 3900 #define HNS3_STRP_INNER_VLAN 0x2 3901 #define HNS3_STRP_BOTH 0x3 3902 3903 /* Hardware always insert VLAN tag into RX descriptor when 3904 * remove the tag from packet, driver needs to determine 3905 * reporting which tag to stack. 3906 */ 3907 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, 3908 HNS3_RXD_STRP_TAGP_S)) { 3909 case HNS3_STRP_OUTER_VLAN: 3910 if (handle->port_base_vlan_state != 3911 HNAE3_PORT_BASE_VLAN_DISABLE) 3912 return false; 3913 3914 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 3915 return true; 3916 case HNS3_STRP_INNER_VLAN: 3917 if (handle->port_base_vlan_state != 3918 HNAE3_PORT_BASE_VLAN_DISABLE) 3919 return false; 3920 3921 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 3922 return true; 3923 case HNS3_STRP_BOTH: 3924 if (handle->port_base_vlan_state == 3925 HNAE3_PORT_BASE_VLAN_DISABLE) 3926 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); 3927 else 3928 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); 3929 3930 return true; 3931 default: 3932 return false; 3933 } 3934 } 3935 3936 static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring) 3937 { 3938 ring->desc[ring->next_to_clean].rx.bd_base_info &= 3939 cpu_to_le32(~BIT(HNS3_RXD_VLD_B)); 3940 ring->desc_cb[ring->next_to_clean].refill = 0; 3941 ring->next_to_clean += 1; 3942 3943 if (unlikely(ring->next_to_clean == ring->desc_num)) 3944 ring->next_to_clean = 0; 3945 } 3946 3947 static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, 3948 unsigned char *va) 3949 { 3950 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; 3951 struct net_device *netdev = ring_to_netdev(ring); 3952 struct sk_buff *skb; 3953 3954 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); 3955 skb = ring->skb; 3956 if (unlikely(!skb)) { 3957 hns3_rl_err(netdev, "alloc rx skb fail\n"); 3958 hns3_ring_stats_update(ring, sw_err_cnt); 3959 3960 return -ENOMEM; 3961 } 3962 3963 trace_hns3_rx_desc(ring); 3964 prefetchw(skb->data); 3965 3966 ring->pending_buf = 1; 3967 ring->frag_num = 0; 3968 ring->tail_skb = NULL; 3969 if (length <= HNS3_RX_HEAD_SIZE) { 3970 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 3971 3972 /* We can reuse buffer as-is, just make sure it is reusable */ 3973 if (dev_page_is_reusable(desc_cb->priv)) 3974 desc_cb->reuse_flag = 1; 3975 else if (desc_cb->type & DESC_TYPE_PP_FRAG) 3976 page_pool_put_full_page(ring->page_pool, desc_cb->priv, 3977 false); 3978 else /* This page cannot be reused so discard it */ 3979 __page_frag_cache_drain(desc_cb->priv, 3980 desc_cb->pagecnt_bias); 3981 3982 hns3_rx_ring_move_fw(ring); 3983 return 0; 3984 } 3985 3986 if (ring->page_pool) 3987 skb_mark_for_recycle(skb); 3988 3989 hns3_ring_stats_update(ring, seg_pkt_cnt); 3990 3991 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); 3992 __skb_put(skb, ring->pull_len); 3993 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, 3994 desc_cb); 3995 hns3_rx_ring_move_fw(ring); 3996 3997 return 0; 3998 } 3999 4000 static int hns3_add_frag(struct hns3_enet_ring *ring) 4001 { 4002 struct sk_buff *skb = ring->skb; 4003 struct sk_buff *head_skb = skb; 4004 struct sk_buff *new_skb; 4005 struct hns3_desc_cb *desc_cb; 4006 struct hns3_desc *desc; 4007 u32 bd_base_info; 4008 4009 do { 4010 desc = &ring->desc[ring->next_to_clean]; 4011 desc_cb = &ring->desc_cb[ring->next_to_clean]; 4012 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 4013 /* make sure HW write desc complete */ 4014 dma_rmb(); 4015 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) 4016 return -ENXIO; 4017 4018 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { 4019 new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); 4020 if (unlikely(!new_skb)) { 4021 hns3_rl_err(ring_to_netdev(ring), 4022 "alloc rx fraglist skb fail\n"); 4023 return -ENXIO; 4024 } 4025 4026 if (ring->page_pool) 4027 skb_mark_for_recycle(new_skb); 4028 4029 ring->frag_num = 0; 4030 4031 if (ring->tail_skb) { 4032 ring->tail_skb->next = new_skb; 4033 ring->tail_skb = new_skb; 4034 } else { 4035 skb_shinfo(skb)->frag_list = new_skb; 4036 ring->tail_skb = new_skb; 4037 } 4038 } 4039 4040 if (ring->tail_skb) { 4041 head_skb->truesize += hns3_buf_size(ring); 4042 head_skb->data_len += le16_to_cpu(desc->rx.size); 4043 head_skb->len += le16_to_cpu(desc->rx.size); 4044 skb = ring->tail_skb; 4045 } 4046 4047 dma_sync_single_for_cpu(ring_to_dev(ring), 4048 desc_cb->dma + desc_cb->page_offset, 4049 hns3_buf_size(ring), 4050 DMA_FROM_DEVICE); 4051 4052 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); 4053 trace_hns3_rx_desc(ring); 4054 hns3_rx_ring_move_fw(ring); 4055 ring->pending_buf++; 4056 } while (!(bd_base_info & BIT(HNS3_RXD_FE_B))); 4057 4058 return 0; 4059 } 4060 4061 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, 4062 struct sk_buff *skb, u32 l234info, 4063 u32 bd_base_info, u32 ol_info, u16 csum) 4064 { 4065 struct net_device *netdev = ring_to_netdev(ring); 4066 struct hns3_nic_priv *priv = netdev_priv(netdev); 4067 u32 l3_type; 4068 4069 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, 4070 HNS3_RXD_GRO_SIZE_M, 4071 HNS3_RXD_GRO_SIZE_S); 4072 /* if there is no HW GRO, do not set gro params */ 4073 if (!skb_shinfo(skb)->gso_size) { 4074 hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info, 4075 csum); 4076 return 0; 4077 } 4078 4079 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, 4080 HNS3_RXD_GRO_COUNT_M, 4081 HNS3_RXD_GRO_COUNT_S); 4082 4083 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { 4084 u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, 4085 HNS3_RXD_PTYPE_S); 4086 4087 l3_type = hns3_rx_ptype_tbl[ptype].l3_type; 4088 } else { 4089 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, 4090 HNS3_RXD_L3ID_S); 4091 } 4092 4093 if (l3_type == HNS3_L3_TYPE_IPV4) 4094 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 4095 else if (l3_type == HNS3_L3_TYPE_IPV6) 4096 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 4097 else 4098 return -EFAULT; 4099 4100 return hns3_gro_complete(skb, l234info); 4101 } 4102 4103 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, 4104 struct sk_buff *skb, u32 rss_hash) 4105 { 4106 struct hnae3_handle *handle = ring->tqp->handle; 4107 enum pkt_hash_types rss_type; 4108 4109 if (rss_hash) 4110 rss_type = handle->kinfo.rss_type; 4111 else 4112 rss_type = PKT_HASH_TYPE_NONE; 4113 4114 skb_set_hash(skb, rss_hash, rss_type); 4115 } 4116 4117 static void hns3_handle_rx_ts_info(struct net_device *netdev, 4118 struct hns3_desc *desc, struct sk_buff *skb, 4119 u32 bd_base_info) 4120 { 4121 if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) { 4122 struct hnae3_handle *h = hns3_get_handle(netdev); 4123 u32 nsec = le32_to_cpu(desc->ts_nsec); 4124 u32 sec = le32_to_cpu(desc->ts_sec); 4125 4126 if (h->ae_algo->ops->get_rx_hwts) 4127 h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec); 4128 } 4129 } 4130 4131 static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring *ring, 4132 struct hns3_desc *desc, struct sk_buff *skb, 4133 u32 l234info) 4134 { 4135 struct net_device *netdev = ring_to_netdev(ring); 4136 4137 /* Based on hw strategy, the tag offloaded will be stored at 4138 * ot_vlan_tag in two layer tag case, and stored at vlan_tag 4139 * in one layer tag case. 4140 */ 4141 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { 4142 u16 vlan_tag; 4143 4144 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) 4145 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 4146 vlan_tag); 4147 } 4148 } 4149 4150 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) 4151 { 4152 struct net_device *netdev = ring_to_netdev(ring); 4153 enum hns3_pkt_l2t_type l2_frame_type; 4154 u32 bd_base_info, l234info, ol_info; 4155 struct hns3_desc *desc; 4156 unsigned int len; 4157 int pre_ntc, ret; 4158 u16 csum; 4159 4160 /* bdinfo handled below is only valid on the last BD of the 4161 * current packet, and ring->next_to_clean indicates the first 4162 * descriptor of next packet, so need - 1 below. 4163 */ 4164 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : 4165 (ring->desc_num - 1); 4166 desc = &ring->desc[pre_ntc]; 4167 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 4168 l234info = le32_to_cpu(desc->rx.l234_info); 4169 ol_info = le32_to_cpu(desc->rx.ol_info); 4170 csum = le16_to_cpu(desc->csum); 4171 4172 hns3_handle_rx_ts_info(netdev, desc, skb, bd_base_info); 4173 4174 hns3_handle_rx_vlan_tag(ring, desc, skb, l234info); 4175 4176 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | 4177 BIT(HNS3_RXD_L2E_B))))) { 4178 u64_stats_update_begin(&ring->syncp); 4179 if (l234info & BIT(HNS3_RXD_L2E_B)) 4180 ring->stats.l2_err++; 4181 else 4182 ring->stats.err_pkt_len++; 4183 u64_stats_update_end(&ring->syncp); 4184 4185 return -EFAULT; 4186 } 4187 4188 len = skb->len; 4189 4190 /* Do update ip stack process */ 4191 skb->protocol = eth_type_trans(skb, netdev); 4192 4193 /* This is needed in order to enable forwarding support */ 4194 ret = hns3_set_gro_and_checksum(ring, skb, l234info, 4195 bd_base_info, ol_info, csum); 4196 if (unlikely(ret)) { 4197 hns3_ring_stats_update(ring, rx_err_cnt); 4198 return ret; 4199 } 4200 4201 l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, 4202 HNS3_RXD_DMAC_S); 4203 4204 u64_stats_update_begin(&ring->syncp); 4205 ring->stats.rx_pkts++; 4206 ring->stats.rx_bytes += len; 4207 4208 if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) 4209 ring->stats.rx_multicast++; 4210 4211 u64_stats_update_end(&ring->syncp); 4212 4213 ring->tqp_vector->rx_group.total_bytes += len; 4214 4215 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); 4216 return 0; 4217 } 4218 4219 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) 4220 { 4221 struct sk_buff *skb = ring->skb; 4222 struct hns3_desc_cb *desc_cb; 4223 struct hns3_desc *desc; 4224 unsigned int length; 4225 u32 bd_base_info; 4226 int ret; 4227 4228 desc = &ring->desc[ring->next_to_clean]; 4229 desc_cb = &ring->desc_cb[ring->next_to_clean]; 4230 4231 prefetch(desc); 4232 4233 if (!skb) { 4234 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 4235 /* Check valid BD */ 4236 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) 4237 return -ENXIO; 4238 4239 dma_rmb(); 4240 length = le16_to_cpu(desc->rx.size); 4241 4242 ring->va = desc_cb->buf + desc_cb->page_offset; 4243 4244 dma_sync_single_for_cpu(ring_to_dev(ring), 4245 desc_cb->dma + desc_cb->page_offset, 4246 hns3_buf_size(ring), 4247 DMA_FROM_DEVICE); 4248 4249 /* Prefetch first cache line of first page. 4250 * Idea is to cache few bytes of the header of the packet. 4251 * Our L1 Cache line size is 64B so need to prefetch twice to make 4252 * it 128B. But in actual we can have greater size of caches with 4253 * 128B Level 1 cache lines. In such a case, single fetch would 4254 * suffice to cache in the relevant part of the header. 4255 */ 4256 net_prefetch(ring->va); 4257 4258 ret = hns3_alloc_skb(ring, length, ring->va); 4259 skb = ring->skb; 4260 4261 if (ret < 0) /* alloc buffer fail */ 4262 return ret; 4263 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */ 4264 ret = hns3_add_frag(ring); 4265 if (ret) 4266 return ret; 4267 } 4268 } else { 4269 ret = hns3_add_frag(ring); 4270 if (ret) 4271 return ret; 4272 } 4273 4274 /* As the head data may be changed when GRO enable, copy 4275 * the head data in after other data rx completed 4276 */ 4277 if (skb->len > HNS3_RX_HEAD_SIZE) 4278 memcpy(skb->data, ring->va, 4279 ALIGN(ring->pull_len, sizeof(long))); 4280 4281 ret = hns3_handle_bdinfo(ring, skb); 4282 if (unlikely(ret)) { 4283 dev_kfree_skb_any(skb); 4284 return ret; 4285 } 4286 4287 skb_record_rx_queue(skb, ring->tqp->tqp_index); 4288 return 0; 4289 } 4290 4291 int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, 4292 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) 4293 { 4294 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 4295 int unused_count = hns3_desc_unused(ring); 4296 bool failure = false; 4297 int recv_pkts = 0; 4298 int err; 4299 4300 unused_count -= ring->pending_buf; 4301 4302 while (recv_pkts < budget) { 4303 /* Reuse or realloc buffers */ 4304 if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 4305 failure = failure || 4306 hns3_nic_alloc_rx_buffers(ring, unused_count); 4307 unused_count = 0; 4308 } 4309 4310 /* Poll one pkt */ 4311 err = hns3_handle_rx_bd(ring); 4312 /* Do not get FE for the packet or failed to alloc skb */ 4313 if (unlikely(!ring->skb || err == -ENXIO)) { 4314 goto out; 4315 } else if (likely(!err)) { 4316 rx_fn(ring, ring->skb); 4317 recv_pkts++; 4318 } 4319 4320 unused_count += ring->pending_buf; 4321 ring->skb = NULL; 4322 ring->pending_buf = 0; 4323 } 4324 4325 out: 4326 /* sync head pointer before exiting, since hardware will calculate 4327 * FBD number with head pointer 4328 */ 4329 if (unused_count > 0) 4330 failure = failure || 4331 hns3_nic_alloc_rx_buffers(ring, unused_count); 4332 4333 return failure ? budget : recv_pkts; 4334 } 4335 4336 static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector) 4337 { 4338 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; 4339 struct dim_sample sample = {}; 4340 4341 if (!rx_group->coal.adapt_enable) 4342 return; 4343 4344 dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets, 4345 rx_group->total_bytes, &sample); 4346 net_dim(&rx_group->dim, sample); 4347 } 4348 4349 static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector) 4350 { 4351 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; 4352 struct dim_sample sample = {}; 4353 4354 if (!tx_group->coal.adapt_enable) 4355 return; 4356 4357 dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets, 4358 tx_group->total_bytes, &sample); 4359 net_dim(&tx_group->dim, sample); 4360 } 4361 4362 static int hns3_nic_common_poll(struct napi_struct *napi, int budget) 4363 { 4364 struct hns3_nic_priv *priv = netdev_priv(napi->dev); 4365 struct hns3_enet_ring *ring; 4366 int rx_pkt_total = 0; 4367 4368 struct hns3_enet_tqp_vector *tqp_vector = 4369 container_of(napi, struct hns3_enet_tqp_vector, napi); 4370 bool clean_complete = true; 4371 int rx_budget = budget; 4372 4373 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 4374 napi_complete(napi); 4375 return 0; 4376 } 4377 4378 /* Since the actual Tx work is minimal, we can give the Tx a larger 4379 * budget and be more aggressive about cleaning up the Tx descriptors. 4380 */ 4381 hns3_for_each_ring(ring, tqp_vector->tx_group) 4382 hns3_clean_tx_ring(ring, budget); 4383 4384 /* make sure rx ring budget not smaller than 1 */ 4385 if (tqp_vector->num_tqps > 1) 4386 rx_budget = max(budget / tqp_vector->num_tqps, 1); 4387 4388 hns3_for_each_ring(ring, tqp_vector->rx_group) { 4389 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, 4390 hns3_rx_skb); 4391 if (rx_cleaned >= rx_budget) 4392 clean_complete = false; 4393 4394 rx_pkt_total += rx_cleaned; 4395 } 4396 4397 tqp_vector->rx_group.total_packets += rx_pkt_total; 4398 4399 if (!clean_complete) 4400 return budget; 4401 4402 if (napi_complete(napi) && 4403 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { 4404 hns3_update_rx_int_coalesce(tqp_vector); 4405 hns3_update_tx_int_coalesce(tqp_vector); 4406 4407 hns3_mask_vector_irq(tqp_vector, 1); 4408 } 4409 4410 return rx_pkt_total; 4411 } 4412 4413 static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 4414 struct hnae3_ring_chain_node **head, 4415 bool is_tx) 4416 { 4417 u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX; 4418 u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX; 4419 struct hnae3_ring_chain_node *cur_chain = *head; 4420 struct pci_dev *pdev = tqp_vector->handle->pdev; 4421 struct hnae3_ring_chain_node *chain; 4422 struct hns3_enet_ring *ring; 4423 4424 ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring; 4425 4426 if (cur_chain) { 4427 while (cur_chain->next) 4428 cur_chain = cur_chain->next; 4429 } 4430 4431 while (ring) { 4432 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); 4433 if (!chain) 4434 return -ENOMEM; 4435 if (cur_chain) 4436 cur_chain->next = chain; 4437 else 4438 *head = chain; 4439 chain->tqp_index = ring->tqp->tqp_index; 4440 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, 4441 bit_value); 4442 hnae3_set_field(chain->int_gl_idx, 4443 HNAE3_RING_GL_IDX_M, 4444 HNAE3_RING_GL_IDX_S, field_value); 4445 4446 cur_chain = chain; 4447 4448 ring = ring->next; 4449 } 4450 4451 return 0; 4452 } 4453 4454 static struct hnae3_ring_chain_node * 4455 hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector) 4456 { 4457 struct pci_dev *pdev = tqp_vector->handle->pdev; 4458 struct hnae3_ring_chain_node *cur_chain = NULL; 4459 struct hnae3_ring_chain_node *chain; 4460 4461 if (hns3_create_ring_chain(tqp_vector, &cur_chain, true)) 4462 goto err_free_chain; 4463 4464 if (hns3_create_ring_chain(tqp_vector, &cur_chain, false)) 4465 goto err_free_chain; 4466 4467 return cur_chain; 4468 4469 err_free_chain: 4470 while (cur_chain) { 4471 chain = cur_chain->next; 4472 devm_kfree(&pdev->dev, cur_chain); 4473 cur_chain = chain; 4474 } 4475 4476 return NULL; 4477 } 4478 4479 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 4480 struct hnae3_ring_chain_node *head) 4481 { 4482 struct pci_dev *pdev = tqp_vector->handle->pdev; 4483 struct hnae3_ring_chain_node *chain_tmp, *chain; 4484 4485 chain = head; 4486 4487 while (chain) { 4488 chain_tmp = chain->next; 4489 devm_kfree(&pdev->dev, chain); 4490 chain = chain_tmp; 4491 } 4492 } 4493 4494 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, 4495 struct hns3_enet_ring *ring) 4496 { 4497 ring->next = group->ring; 4498 group->ring = ring; 4499 4500 group->count++; 4501 } 4502 4503 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) 4504 { 4505 struct pci_dev *pdev = priv->ae_handle->pdev; 4506 struct hns3_enet_tqp_vector *tqp_vector; 4507 int num_vectors = priv->vector_num; 4508 int numa_node; 4509 int vector_i; 4510 4511 numa_node = dev_to_node(&pdev->dev); 4512 4513 for (vector_i = 0; vector_i < num_vectors; vector_i++) { 4514 tqp_vector = &priv->tqp_vector[vector_i]; 4515 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), 4516 &tqp_vector->affinity_mask); 4517 } 4518 } 4519 4520 static void hns3_rx_dim_work(struct work_struct *work) 4521 { 4522 struct dim *dim = container_of(work, struct dim, work); 4523 struct hns3_enet_ring_group *group = container_of(dim, 4524 struct hns3_enet_ring_group, dim); 4525 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; 4526 struct dim_cq_moder cur_moder = 4527 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 4528 4529 hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec); 4530 tqp_vector->rx_group.coal.int_gl = cur_moder.usec; 4531 4532 if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) { 4533 hns3_set_vector_coalesce_rx_ql(tqp_vector, cur_moder.pkts); 4534 tqp_vector->rx_group.coal.int_ql = cur_moder.pkts; 4535 } 4536 4537 dim->state = DIM_START_MEASURE; 4538 } 4539 4540 static void hns3_tx_dim_work(struct work_struct *work) 4541 { 4542 struct dim *dim = container_of(work, struct dim, work); 4543 struct hns3_enet_ring_group *group = container_of(dim, 4544 struct hns3_enet_ring_group, dim); 4545 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; 4546 struct dim_cq_moder cur_moder = 4547 net_dim_get_tx_moderation(dim->mode, dim->profile_ix); 4548 4549 hns3_set_vector_coalesce_tx_gl(tqp_vector, cur_moder.usec); 4550 tqp_vector->tx_group.coal.int_gl = cur_moder.usec; 4551 4552 if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) { 4553 hns3_set_vector_coalesce_tx_ql(tqp_vector, cur_moder.pkts); 4554 tqp_vector->tx_group.coal.int_ql = cur_moder.pkts; 4555 } 4556 4557 dim->state = DIM_START_MEASURE; 4558 } 4559 4560 static void hns3_nic_init_dim(struct hns3_enet_tqp_vector *tqp_vector) 4561 { 4562 INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work); 4563 INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work); 4564 } 4565 4566 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) 4567 { 4568 struct hnae3_handle *h = priv->ae_handle; 4569 struct hns3_enet_tqp_vector *tqp_vector; 4570 int ret; 4571 int i; 4572 4573 hns3_nic_set_cpumask(priv); 4574 4575 for (i = 0; i < priv->vector_num; i++) { 4576 tqp_vector = &priv->tqp_vector[i]; 4577 hns3_vector_coalesce_init_hw(tqp_vector, priv); 4578 tqp_vector->num_tqps = 0; 4579 hns3_nic_init_dim(tqp_vector); 4580 } 4581 4582 for (i = 0; i < h->kinfo.num_tqps; i++) { 4583 u16 vector_i = i % priv->vector_num; 4584 u16 tqp_num = h->kinfo.num_tqps; 4585 4586 tqp_vector = &priv->tqp_vector[vector_i]; 4587 4588 hns3_add_ring_to_group(&tqp_vector->tx_group, 4589 &priv->ring[i]); 4590 4591 hns3_add_ring_to_group(&tqp_vector->rx_group, 4592 &priv->ring[i + tqp_num]); 4593 4594 priv->ring[i].tqp_vector = tqp_vector; 4595 priv->ring[i + tqp_num].tqp_vector = tqp_vector; 4596 tqp_vector->num_tqps++; 4597 } 4598 4599 for (i = 0; i < priv->vector_num; i++) { 4600 struct hnae3_ring_chain_node *vector_ring_chain; 4601 4602 tqp_vector = &priv->tqp_vector[i]; 4603 4604 tqp_vector->rx_group.total_bytes = 0; 4605 tqp_vector->rx_group.total_packets = 0; 4606 tqp_vector->tx_group.total_bytes = 0; 4607 tqp_vector->tx_group.total_packets = 0; 4608 tqp_vector->handle = h; 4609 4610 vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector); 4611 if (!vector_ring_chain) { 4612 ret = -ENOMEM; 4613 goto map_ring_fail; 4614 } 4615 4616 ret = h->ae_algo->ops->map_ring_to_vector(h, 4617 tqp_vector->vector_irq, vector_ring_chain); 4618 4619 hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain); 4620 4621 if (ret) 4622 goto map_ring_fail; 4623 4624 netif_napi_add(priv->netdev, &tqp_vector->napi, 4625 hns3_nic_common_poll, NAPI_POLL_WEIGHT); 4626 } 4627 4628 return 0; 4629 4630 map_ring_fail: 4631 while (i--) 4632 netif_napi_del(&priv->tqp_vector[i].napi); 4633 4634 return ret; 4635 } 4636 4637 static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv) 4638 { 4639 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); 4640 struct hns3_enet_coalesce *tx_coal = &priv->tx_coal; 4641 struct hns3_enet_coalesce *rx_coal = &priv->rx_coal; 4642 4643 /* initialize the configuration for interrupt coalescing. 4644 * 1. GL (Interrupt Gap Limiter) 4645 * 2. RL (Interrupt Rate Limiter) 4646 * 3. QL (Interrupt Quantity Limiter) 4647 * 4648 * Default: enable interrupt coalescing self-adaptive and GL 4649 */ 4650 tx_coal->adapt_enable = 1; 4651 rx_coal->adapt_enable = 1; 4652 4653 tx_coal->int_gl = HNS3_INT_GL_50K; 4654 rx_coal->int_gl = HNS3_INT_GL_50K; 4655 4656 rx_coal->flow_level = HNS3_FLOW_LOW; 4657 tx_coal->flow_level = HNS3_FLOW_LOW; 4658 4659 if (ae_dev->dev_specs.int_ql_max) { 4660 tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; 4661 rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; 4662 } 4663 } 4664 4665 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) 4666 { 4667 struct hnae3_handle *h = priv->ae_handle; 4668 struct hns3_enet_tqp_vector *tqp_vector; 4669 struct hnae3_vector_info *vector; 4670 struct pci_dev *pdev = h->pdev; 4671 u16 tqp_num = h->kinfo.num_tqps; 4672 u16 vector_num; 4673 int ret = 0; 4674 u16 i; 4675 4676 /* RSS size, cpu online and vector_num should be the same */ 4677 /* Should consider 2p/4p later */ 4678 vector_num = min_t(u16, num_online_cpus(), tqp_num); 4679 4680 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), 4681 GFP_KERNEL); 4682 if (!vector) 4683 return -ENOMEM; 4684 4685 /* save the actual available vector number */ 4686 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); 4687 4688 priv->vector_num = vector_num; 4689 priv->tqp_vector = (struct hns3_enet_tqp_vector *) 4690 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), 4691 GFP_KERNEL); 4692 if (!priv->tqp_vector) { 4693 ret = -ENOMEM; 4694 goto out; 4695 } 4696 4697 for (i = 0; i < priv->vector_num; i++) { 4698 tqp_vector = &priv->tqp_vector[i]; 4699 tqp_vector->idx = i; 4700 tqp_vector->mask_addr = vector[i].io_addr; 4701 tqp_vector->vector_irq = vector[i].vector; 4702 hns3_vector_coalesce_init(tqp_vector, priv); 4703 } 4704 4705 out: 4706 devm_kfree(&pdev->dev, vector); 4707 return ret; 4708 } 4709 4710 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) 4711 { 4712 group->ring = NULL; 4713 group->count = 0; 4714 } 4715 4716 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) 4717 { 4718 struct hnae3_ring_chain_node *vector_ring_chain; 4719 struct hnae3_handle *h = priv->ae_handle; 4720 struct hns3_enet_tqp_vector *tqp_vector; 4721 int i; 4722 4723 for (i = 0; i < priv->vector_num; i++) { 4724 tqp_vector = &priv->tqp_vector[i]; 4725 4726 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) 4727 continue; 4728 4729 /* Since the mapping can be overwritten, when fail to get the 4730 * chain between vector and ring, we should go on to deal with 4731 * the remaining options. 4732 */ 4733 vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector); 4734 if (!vector_ring_chain) 4735 dev_warn(priv->dev, "failed to get ring chain\n"); 4736 4737 h->ae_algo->ops->unmap_ring_from_vector(h, 4738 tqp_vector->vector_irq, vector_ring_chain); 4739 4740 hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain); 4741 4742 hns3_clear_ring_group(&tqp_vector->rx_group); 4743 hns3_clear_ring_group(&tqp_vector->tx_group); 4744 netif_napi_del(&priv->tqp_vector[i].napi); 4745 } 4746 } 4747 4748 static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) 4749 { 4750 struct hnae3_handle *h = priv->ae_handle; 4751 struct pci_dev *pdev = h->pdev; 4752 int i, ret; 4753 4754 for (i = 0; i < priv->vector_num; i++) { 4755 struct hns3_enet_tqp_vector *tqp_vector; 4756 4757 tqp_vector = &priv->tqp_vector[i]; 4758 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); 4759 if (ret) 4760 return; 4761 } 4762 4763 devm_kfree(&pdev->dev, priv->tqp_vector); 4764 } 4765 4766 static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, 4767 unsigned int ring_type) 4768 { 4769 int queue_num = priv->ae_handle->kinfo.num_tqps; 4770 struct hns3_enet_ring *ring; 4771 int desc_num; 4772 4773 if (ring_type == HNAE3_RING_TYPE_TX) { 4774 ring = &priv->ring[q->tqp_index]; 4775 desc_num = priv->ae_handle->kinfo.num_tx_desc; 4776 ring->queue_index = q->tqp_index; 4777 ring->tx_copybreak = priv->tx_copybreak; 4778 ring->last_to_use = 0; 4779 } else { 4780 ring = &priv->ring[q->tqp_index + queue_num]; 4781 desc_num = priv->ae_handle->kinfo.num_rx_desc; 4782 ring->queue_index = q->tqp_index; 4783 ring->rx_copybreak = priv->rx_copybreak; 4784 } 4785 4786 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); 4787 4788 ring->tqp = q; 4789 ring->desc = NULL; 4790 ring->desc_cb = NULL; 4791 ring->dev = priv->dev; 4792 ring->desc_dma_addr = 0; 4793 ring->buf_size = q->buf_size; 4794 ring->desc_num = desc_num; 4795 ring->next_to_use = 0; 4796 ring->next_to_clean = 0; 4797 } 4798 4799 static void hns3_queue_to_ring(struct hnae3_queue *tqp, 4800 struct hns3_nic_priv *priv) 4801 { 4802 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); 4803 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); 4804 } 4805 4806 static int hns3_get_ring_config(struct hns3_nic_priv *priv) 4807 { 4808 struct hnae3_handle *h = priv->ae_handle; 4809 struct pci_dev *pdev = h->pdev; 4810 int i; 4811 4812 priv->ring = devm_kzalloc(&pdev->dev, 4813 array3_size(h->kinfo.num_tqps, 4814 sizeof(*priv->ring), 2), 4815 GFP_KERNEL); 4816 if (!priv->ring) 4817 return -ENOMEM; 4818 4819 for (i = 0; i < h->kinfo.num_tqps; i++) 4820 hns3_queue_to_ring(h->kinfo.tqp[i], priv); 4821 4822 return 0; 4823 } 4824 4825 static void hns3_put_ring_config(struct hns3_nic_priv *priv) 4826 { 4827 if (!priv->ring) 4828 return; 4829 4830 devm_kfree(priv->dev, priv->ring); 4831 priv->ring = NULL; 4832 } 4833 4834 static void hns3_alloc_page_pool(struct hns3_enet_ring *ring) 4835 { 4836 struct page_pool_params pp_params = { 4837 .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG | 4838 PP_FLAG_DMA_SYNC_DEV, 4839 .order = hns3_page_order(ring), 4840 .pool_size = ring->desc_num * hns3_buf_size(ring) / 4841 (PAGE_SIZE << hns3_page_order(ring)), 4842 .nid = dev_to_node(ring_to_dev(ring)), 4843 .dev = ring_to_dev(ring), 4844 .dma_dir = DMA_FROM_DEVICE, 4845 .offset = 0, 4846 .max_len = PAGE_SIZE << hns3_page_order(ring), 4847 }; 4848 4849 ring->page_pool = page_pool_create(&pp_params); 4850 if (IS_ERR(ring->page_pool)) { 4851 dev_warn(ring_to_dev(ring), "page pool creation failed: %ld\n", 4852 PTR_ERR(ring->page_pool)); 4853 ring->page_pool = NULL; 4854 } 4855 } 4856 4857 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) 4858 { 4859 int ret; 4860 4861 if (ring->desc_num <= 0 || ring->buf_size <= 0) 4862 return -EINVAL; 4863 4864 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, 4865 sizeof(ring->desc_cb[0]), GFP_KERNEL); 4866 if (!ring->desc_cb) { 4867 ret = -ENOMEM; 4868 goto out; 4869 } 4870 4871 ret = hns3_alloc_desc(ring); 4872 if (ret) 4873 goto out_with_desc_cb; 4874 4875 if (!HNAE3_IS_TX_RING(ring)) { 4876 if (page_pool_enabled) 4877 hns3_alloc_page_pool(ring); 4878 4879 ret = hns3_alloc_ring_buffers(ring); 4880 if (ret) 4881 goto out_with_desc; 4882 } else { 4883 hns3_init_tx_spare_buffer(ring); 4884 } 4885 4886 return 0; 4887 4888 out_with_desc: 4889 hns3_free_desc(ring); 4890 out_with_desc_cb: 4891 devm_kfree(ring_to_dev(ring), ring->desc_cb); 4892 ring->desc_cb = NULL; 4893 out: 4894 return ret; 4895 } 4896 4897 void hns3_fini_ring(struct hns3_enet_ring *ring) 4898 { 4899 hns3_free_desc(ring); 4900 devm_kfree(ring_to_dev(ring), ring->desc_cb); 4901 ring->desc_cb = NULL; 4902 ring->next_to_clean = 0; 4903 ring->next_to_use = 0; 4904 ring->last_to_use = 0; 4905 ring->pending_buf = 0; 4906 if (!HNAE3_IS_TX_RING(ring) && ring->skb) { 4907 dev_kfree_skb_any(ring->skb); 4908 ring->skb = NULL; 4909 } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) { 4910 struct hns3_tx_spare *tx_spare = ring->tx_spare; 4911 4912 dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len, 4913 DMA_TO_DEVICE); 4914 free_pages((unsigned long)tx_spare->buf, 4915 get_order(tx_spare->len)); 4916 devm_kfree(ring_to_dev(ring), tx_spare); 4917 ring->tx_spare = NULL; 4918 } 4919 4920 if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) { 4921 page_pool_destroy(ring->page_pool); 4922 ring->page_pool = NULL; 4923 } 4924 } 4925 4926 static int hns3_buf_size2type(u32 buf_size) 4927 { 4928 int bd_size_type; 4929 4930 switch (buf_size) { 4931 case 512: 4932 bd_size_type = HNS3_BD_SIZE_512_TYPE; 4933 break; 4934 case 1024: 4935 bd_size_type = HNS3_BD_SIZE_1024_TYPE; 4936 break; 4937 case 2048: 4938 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 4939 break; 4940 case 4096: 4941 bd_size_type = HNS3_BD_SIZE_4096_TYPE; 4942 break; 4943 default: 4944 bd_size_type = HNS3_BD_SIZE_2048_TYPE; 4945 } 4946 4947 return bd_size_type; 4948 } 4949 4950 static void hns3_init_ring_hw(struct hns3_enet_ring *ring) 4951 { 4952 dma_addr_t dma = ring->desc_dma_addr; 4953 struct hnae3_queue *q = ring->tqp; 4954 4955 if (!HNAE3_IS_TX_RING(ring)) { 4956 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma); 4957 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, 4958 (u32)((dma >> 31) >> 1)); 4959 4960 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, 4961 hns3_buf_size2type(ring->buf_size)); 4962 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, 4963 ring->desc_num / 8 - 1); 4964 } else { 4965 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, 4966 (u32)dma); 4967 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, 4968 (u32)((dma >> 31) >> 1)); 4969 4970 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, 4971 ring->desc_num / 8 - 1); 4972 } 4973 } 4974 4975 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) 4976 { 4977 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 4978 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 4979 int i; 4980 4981 for (i = 0; i < tc_info->num_tc; i++) { 4982 int j; 4983 4984 for (j = 0; j < tc_info->tqp_count[i]; j++) { 4985 struct hnae3_queue *q; 4986 4987 q = priv->ring[tc_info->tqp_offset[i] + j].tqp; 4988 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i); 4989 } 4990 } 4991 } 4992 4993 int hns3_init_all_ring(struct hns3_nic_priv *priv) 4994 { 4995 struct hnae3_handle *h = priv->ae_handle; 4996 int ring_num = h->kinfo.num_tqps * 2; 4997 int i, j; 4998 int ret; 4999 5000 for (i = 0; i < ring_num; i++) { 5001 ret = hns3_alloc_ring_memory(&priv->ring[i]); 5002 if (ret) { 5003 dev_err(priv->dev, 5004 "Alloc ring memory fail! ret=%d\n", ret); 5005 goto out_when_alloc_ring_memory; 5006 } 5007 5008 u64_stats_init(&priv->ring[i].syncp); 5009 } 5010 5011 return 0; 5012 5013 out_when_alloc_ring_memory: 5014 for (j = i - 1; j >= 0; j--) 5015 hns3_fini_ring(&priv->ring[j]); 5016 5017 return -ENOMEM; 5018 } 5019 5020 static void hns3_uninit_all_ring(struct hns3_nic_priv *priv) 5021 { 5022 struct hnae3_handle *h = priv->ae_handle; 5023 int i; 5024 5025 for (i = 0; i < h->kinfo.num_tqps; i++) { 5026 hns3_fini_ring(&priv->ring[i]); 5027 hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); 5028 } 5029 } 5030 5031 /* Set mac addr if it is configured. or leave it to the AE driver */ 5032 static int hns3_init_mac_addr(struct net_device *netdev) 5033 { 5034 struct hns3_nic_priv *priv = netdev_priv(netdev); 5035 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 5036 struct hnae3_handle *h = priv->ae_handle; 5037 u8 mac_addr_temp[ETH_ALEN]; 5038 int ret = 0; 5039 5040 if (h->ae_algo->ops->get_mac_addr) 5041 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); 5042 5043 /* Check if the MAC address is valid, if not get a random one */ 5044 if (!is_valid_ether_addr(mac_addr_temp)) { 5045 eth_hw_addr_random(netdev); 5046 hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr); 5047 dev_warn(priv->dev, "using random MAC address %s\n", 5048 format_mac_addr); 5049 } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { 5050 eth_hw_addr_set(netdev, mac_addr_temp); 5051 ether_addr_copy(netdev->perm_addr, mac_addr_temp); 5052 } else { 5053 return 0; 5054 } 5055 5056 if (h->ae_algo->ops->set_mac_addr) 5057 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); 5058 5059 return ret; 5060 } 5061 5062 static int hns3_init_phy(struct net_device *netdev) 5063 { 5064 struct hnae3_handle *h = hns3_get_handle(netdev); 5065 int ret = 0; 5066 5067 if (h->ae_algo->ops->mac_connect_phy) 5068 ret = h->ae_algo->ops->mac_connect_phy(h); 5069 5070 return ret; 5071 } 5072 5073 static void hns3_uninit_phy(struct net_device *netdev) 5074 { 5075 struct hnae3_handle *h = hns3_get_handle(netdev); 5076 5077 if (h->ae_algo->ops->mac_disconnect_phy) 5078 h->ae_algo->ops->mac_disconnect_phy(h); 5079 } 5080 5081 static int hns3_client_start(struct hnae3_handle *handle) 5082 { 5083 if (!handle->ae_algo->ops->client_start) 5084 return 0; 5085 5086 return handle->ae_algo->ops->client_start(handle); 5087 } 5088 5089 static void hns3_client_stop(struct hnae3_handle *handle) 5090 { 5091 if (!handle->ae_algo->ops->client_stop) 5092 return; 5093 5094 handle->ae_algo->ops->client_stop(handle); 5095 } 5096 5097 static void hns3_info_show(struct hns3_nic_priv *priv) 5098 { 5099 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 5100 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 5101 5102 hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr); 5103 dev_info(priv->dev, "MAC address: %s\n", format_mac_addr); 5104 dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); 5105 dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); 5106 dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); 5107 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); 5108 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); 5109 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); 5110 dev_info(priv->dev, "Total number of enabled TCs: %u\n", 5111 kinfo->tc_info.num_tc); 5112 dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); 5113 } 5114 5115 static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv, 5116 enum dim_cq_period_mode mode, bool is_tx) 5117 { 5118 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); 5119 struct hnae3_handle *handle = priv->ae_handle; 5120 int i; 5121 5122 if (is_tx) { 5123 priv->tx_cqe_mode = mode; 5124 5125 for (i = 0; i < priv->vector_num; i++) 5126 priv->tqp_vector[i].tx_group.dim.mode = mode; 5127 } else { 5128 priv->rx_cqe_mode = mode; 5129 5130 for (i = 0; i < priv->vector_num; i++) 5131 priv->tqp_vector[i].rx_group.dim.mode = mode; 5132 } 5133 5134 /* only device version above V3(include V3), GL can switch CQ/EQ 5135 * period mode. 5136 */ 5137 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { 5138 u32 new_mode; 5139 u64 reg; 5140 5141 new_mode = (mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) ? 5142 HNS3_CQ_MODE_CQE : HNS3_CQ_MODE_EQE; 5143 reg = is_tx ? HNS3_GL1_CQ_MODE_REG : HNS3_GL0_CQ_MODE_REG; 5144 5145 writel(new_mode, handle->kinfo.io_base + reg); 5146 } 5147 } 5148 5149 void hns3_cq_period_mode_init(struct hns3_nic_priv *priv, 5150 enum dim_cq_period_mode tx_mode, 5151 enum dim_cq_period_mode rx_mode) 5152 { 5153 hns3_set_cq_period_mode(priv, tx_mode, true); 5154 hns3_set_cq_period_mode(priv, rx_mode, false); 5155 } 5156 5157 static void hns3_state_init(struct hnae3_handle *handle) 5158 { 5159 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 5160 struct net_device *netdev = handle->kinfo.netdev; 5161 struct hns3_nic_priv *priv = netdev_priv(netdev); 5162 5163 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 5164 5165 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) 5166 set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); 5167 5168 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 5169 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); 5170 5171 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) 5172 set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); 5173 5174 if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev)) 5175 set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); 5176 } 5177 5178 static int hns3_client_init(struct hnae3_handle *handle) 5179 { 5180 struct pci_dev *pdev = handle->pdev; 5181 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 5182 u16 alloc_tqps, max_rss_size; 5183 struct hns3_nic_priv *priv; 5184 struct net_device *netdev; 5185 int ret; 5186 5187 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, 5188 &max_rss_size); 5189 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); 5190 if (!netdev) 5191 return -ENOMEM; 5192 5193 priv = netdev_priv(netdev); 5194 priv->dev = &pdev->dev; 5195 priv->netdev = netdev; 5196 priv->ae_handle = handle; 5197 priv->tx_timeout_count = 0; 5198 priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; 5199 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); 5200 5201 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); 5202 5203 handle->kinfo.netdev = netdev; 5204 handle->priv = (void *)priv; 5205 5206 hns3_init_mac_addr(netdev); 5207 5208 hns3_set_default_feature(netdev); 5209 5210 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; 5211 netdev->priv_flags |= IFF_UNICAST_FLT; 5212 netdev->netdev_ops = &hns3_nic_netdev_ops; 5213 SET_NETDEV_DEV(netdev, &pdev->dev); 5214 hns3_ethtool_set_ops(netdev); 5215 5216 /* Carrier off reporting is important to ethtool even BEFORE open */ 5217 netif_carrier_off(netdev); 5218 5219 ret = hns3_get_ring_config(priv); 5220 if (ret) { 5221 ret = -ENOMEM; 5222 goto out_get_ring_cfg; 5223 } 5224 5225 hns3_nic_init_coal_cfg(priv); 5226 5227 ret = hns3_nic_alloc_vector_data(priv); 5228 if (ret) { 5229 ret = -ENOMEM; 5230 goto out_alloc_vector_data; 5231 } 5232 5233 ret = hns3_nic_init_vector_data(priv); 5234 if (ret) { 5235 ret = -ENOMEM; 5236 goto out_init_vector_data; 5237 } 5238 5239 ret = hns3_init_all_ring(priv); 5240 if (ret) { 5241 ret = -ENOMEM; 5242 goto out_init_ring; 5243 } 5244 5245 hns3_cq_period_mode_init(priv, DIM_CQ_PERIOD_MODE_START_FROM_EQE, 5246 DIM_CQ_PERIOD_MODE_START_FROM_EQE); 5247 5248 ret = hns3_init_phy(netdev); 5249 if (ret) 5250 goto out_init_phy; 5251 5252 /* the device can work without cpu rmap, only aRFS needs it */ 5253 ret = hns3_set_rx_cpu_rmap(netdev); 5254 if (ret) 5255 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); 5256 5257 ret = hns3_nic_init_irq(priv); 5258 if (ret) { 5259 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); 5260 hns3_free_rx_cpu_rmap(netdev); 5261 goto out_init_irq_fail; 5262 } 5263 5264 ret = hns3_client_start(handle); 5265 if (ret) { 5266 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 5267 goto out_client_start; 5268 } 5269 5270 hns3_dcbnl_setup(handle); 5271 5272 ret = hns3_dbg_init(handle); 5273 if (ret) { 5274 dev_err(priv->dev, "failed to init debugfs, ret = %d\n", 5275 ret); 5276 goto out_client_start; 5277 } 5278 5279 netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size); 5280 5281 hns3_state_init(handle); 5282 5283 ret = register_netdev(netdev); 5284 if (ret) { 5285 dev_err(priv->dev, "probe register netdev fail!\n"); 5286 goto out_reg_netdev_fail; 5287 } 5288 5289 if (netif_msg_drv(handle)) 5290 hns3_info_show(priv); 5291 5292 return ret; 5293 5294 out_reg_netdev_fail: 5295 hns3_dbg_uninit(handle); 5296 out_client_start: 5297 hns3_free_rx_cpu_rmap(netdev); 5298 hns3_nic_uninit_irq(priv); 5299 out_init_irq_fail: 5300 hns3_uninit_phy(netdev); 5301 out_init_phy: 5302 hns3_uninit_all_ring(priv); 5303 out_init_ring: 5304 hns3_nic_uninit_vector_data(priv); 5305 out_init_vector_data: 5306 hns3_nic_dealloc_vector_data(priv); 5307 out_alloc_vector_data: 5308 priv->ring = NULL; 5309 out_get_ring_cfg: 5310 priv->ae_handle = NULL; 5311 free_netdev(netdev); 5312 return ret; 5313 } 5314 5315 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) 5316 { 5317 struct net_device *netdev = handle->kinfo.netdev; 5318 struct hns3_nic_priv *priv = netdev_priv(netdev); 5319 5320 if (netdev->reg_state != NETREG_UNINITIALIZED) 5321 unregister_netdev(netdev); 5322 5323 hns3_client_stop(handle); 5324 5325 hns3_uninit_phy(netdev); 5326 5327 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 5328 netdev_warn(netdev, "already uninitialized\n"); 5329 goto out_netdev_free; 5330 } 5331 5332 hns3_free_rx_cpu_rmap(netdev); 5333 5334 hns3_nic_uninit_irq(priv); 5335 5336 hns3_clear_all_ring(handle, true); 5337 5338 hns3_nic_uninit_vector_data(priv); 5339 5340 hns3_nic_dealloc_vector_data(priv); 5341 5342 hns3_uninit_all_ring(priv); 5343 5344 hns3_put_ring_config(priv); 5345 5346 out_netdev_free: 5347 hns3_dbg_uninit(handle); 5348 free_netdev(netdev); 5349 } 5350 5351 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) 5352 { 5353 struct net_device *netdev = handle->kinfo.netdev; 5354 5355 if (!netdev) 5356 return; 5357 5358 if (linkup) { 5359 netif_tx_wake_all_queues(netdev); 5360 netif_carrier_on(netdev); 5361 if (netif_msg_link(handle)) 5362 netdev_info(netdev, "link up\n"); 5363 } else { 5364 netif_carrier_off(netdev); 5365 netif_tx_stop_all_queues(netdev); 5366 if (netif_msg_link(handle)) 5367 netdev_info(netdev, "link down\n"); 5368 } 5369 } 5370 5371 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) 5372 { 5373 while (ring->next_to_clean != ring->next_to_use) { 5374 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; 5375 hns3_free_buffer_detach(ring, ring->next_to_clean, 0); 5376 ring_ptr_move_fw(ring, next_to_clean); 5377 } 5378 5379 ring->pending_buf = 0; 5380 } 5381 5382 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) 5383 { 5384 struct hns3_desc_cb res_cbs; 5385 int ret; 5386 5387 while (ring->next_to_use != ring->next_to_clean) { 5388 /* When a buffer is not reused, it's memory has been 5389 * freed in hns3_handle_rx_bd or will be freed by 5390 * stack, so we need to replace the buffer here. 5391 */ 5392 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 5393 ret = hns3_alloc_and_map_buffer(ring, &res_cbs); 5394 if (ret) { 5395 hns3_ring_stats_update(ring, sw_err_cnt); 5396 /* if alloc new buffer fail, exit directly 5397 * and reclear in up flow. 5398 */ 5399 netdev_warn(ring_to_netdev(ring), 5400 "reserve buffer map failed, ret = %d\n", 5401 ret); 5402 return ret; 5403 } 5404 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); 5405 } 5406 ring_ptr_move_fw(ring, next_to_use); 5407 } 5408 5409 /* Free the pending skb in rx ring */ 5410 if (ring->skb) { 5411 dev_kfree_skb_any(ring->skb); 5412 ring->skb = NULL; 5413 ring->pending_buf = 0; 5414 } 5415 5416 return 0; 5417 } 5418 5419 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) 5420 { 5421 while (ring->next_to_use != ring->next_to_clean) { 5422 /* When a buffer is not reused, it's memory has been 5423 * freed in hns3_handle_rx_bd or will be freed by 5424 * stack, so only need to unmap the buffer here. 5425 */ 5426 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { 5427 hns3_unmap_buffer(ring, 5428 &ring->desc_cb[ring->next_to_use]); 5429 ring->desc_cb[ring->next_to_use].dma = 0; 5430 } 5431 5432 ring_ptr_move_fw(ring, next_to_use); 5433 } 5434 } 5435 5436 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force) 5437 { 5438 struct net_device *ndev = h->kinfo.netdev; 5439 struct hns3_nic_priv *priv = netdev_priv(ndev); 5440 u32 i; 5441 5442 for (i = 0; i < h->kinfo.num_tqps; i++) { 5443 struct hns3_enet_ring *ring; 5444 5445 ring = &priv->ring[i]; 5446 hns3_clear_tx_ring(ring); 5447 5448 ring = &priv->ring[i + h->kinfo.num_tqps]; 5449 /* Continue to clear other rings even if clearing some 5450 * rings failed. 5451 */ 5452 if (force) 5453 hns3_force_clear_rx_ring(ring); 5454 else 5455 hns3_clear_rx_ring(ring); 5456 } 5457 } 5458 5459 int hns3_nic_reset_all_ring(struct hnae3_handle *h) 5460 { 5461 struct net_device *ndev = h->kinfo.netdev; 5462 struct hns3_nic_priv *priv = netdev_priv(ndev); 5463 struct hns3_enet_ring *rx_ring; 5464 int i, j; 5465 int ret; 5466 5467 ret = h->ae_algo->ops->reset_queue(h); 5468 if (ret) 5469 return ret; 5470 5471 for (i = 0; i < h->kinfo.num_tqps; i++) { 5472 hns3_init_ring_hw(&priv->ring[i]); 5473 5474 /* We need to clear tx ring here because self test will 5475 * use the ring and will not run down before up 5476 */ 5477 hns3_clear_tx_ring(&priv->ring[i]); 5478 priv->ring[i].next_to_clean = 0; 5479 priv->ring[i].next_to_use = 0; 5480 priv->ring[i].last_to_use = 0; 5481 5482 rx_ring = &priv->ring[i + h->kinfo.num_tqps]; 5483 hns3_init_ring_hw(rx_ring); 5484 ret = hns3_clear_rx_ring(rx_ring); 5485 if (ret) 5486 return ret; 5487 5488 /* We can not know the hardware head and tail when this 5489 * function is called in reset flow, so we reuse all desc. 5490 */ 5491 for (j = 0; j < rx_ring->desc_num; j++) 5492 hns3_reuse_buffer(rx_ring, j); 5493 5494 rx_ring->next_to_clean = 0; 5495 rx_ring->next_to_use = 0; 5496 } 5497 5498 hns3_init_tx_ring_tc(priv); 5499 5500 return 0; 5501 } 5502 5503 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) 5504 { 5505 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 5506 struct net_device *ndev = kinfo->netdev; 5507 struct hns3_nic_priv *priv = netdev_priv(ndev); 5508 5509 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) 5510 return 0; 5511 5512 if (!netif_running(ndev)) 5513 return 0; 5514 5515 return hns3_nic_net_stop(ndev); 5516 } 5517 5518 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) 5519 { 5520 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 5521 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); 5522 int ret = 0; 5523 5524 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 5525 netdev_err(kinfo->netdev, "device is not initialized yet\n"); 5526 return -EFAULT; 5527 } 5528 5529 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 5530 5531 if (netif_running(kinfo->netdev)) { 5532 ret = hns3_nic_net_open(kinfo->netdev); 5533 if (ret) { 5534 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); 5535 netdev_err(kinfo->netdev, 5536 "net up fail, ret=%d!\n", ret); 5537 return ret; 5538 } 5539 } 5540 5541 return ret; 5542 } 5543 5544 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) 5545 { 5546 struct net_device *netdev = handle->kinfo.netdev; 5547 struct hns3_nic_priv *priv = netdev_priv(netdev); 5548 int ret; 5549 5550 /* Carrier off reporting is important to ethtool even BEFORE open */ 5551 netif_carrier_off(netdev); 5552 5553 ret = hns3_get_ring_config(priv); 5554 if (ret) 5555 return ret; 5556 5557 ret = hns3_nic_alloc_vector_data(priv); 5558 if (ret) 5559 goto err_put_ring; 5560 5561 ret = hns3_nic_init_vector_data(priv); 5562 if (ret) 5563 goto err_dealloc_vector; 5564 5565 ret = hns3_init_all_ring(priv); 5566 if (ret) 5567 goto err_uninit_vector; 5568 5569 hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode); 5570 5571 /* the device can work without cpu rmap, only aRFS needs it */ 5572 ret = hns3_set_rx_cpu_rmap(netdev); 5573 if (ret) 5574 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); 5575 5576 ret = hns3_nic_init_irq(priv); 5577 if (ret) { 5578 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); 5579 hns3_free_rx_cpu_rmap(netdev); 5580 goto err_init_irq_fail; 5581 } 5582 5583 if (!hns3_is_phys_func(handle->pdev)) 5584 hns3_init_mac_addr(netdev); 5585 5586 ret = hns3_client_start(handle); 5587 if (ret) { 5588 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); 5589 goto err_client_start_fail; 5590 } 5591 5592 set_bit(HNS3_NIC_STATE_INITED, &priv->state); 5593 5594 return ret; 5595 5596 err_client_start_fail: 5597 hns3_free_rx_cpu_rmap(netdev); 5598 hns3_nic_uninit_irq(priv); 5599 err_init_irq_fail: 5600 hns3_uninit_all_ring(priv); 5601 err_uninit_vector: 5602 hns3_nic_uninit_vector_data(priv); 5603 err_dealloc_vector: 5604 hns3_nic_dealloc_vector_data(priv); 5605 err_put_ring: 5606 hns3_put_ring_config(priv); 5607 5608 return ret; 5609 } 5610 5611 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) 5612 { 5613 struct net_device *netdev = handle->kinfo.netdev; 5614 struct hns3_nic_priv *priv = netdev_priv(netdev); 5615 5616 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { 5617 netdev_warn(netdev, "already uninitialized\n"); 5618 return 0; 5619 } 5620 5621 hns3_free_rx_cpu_rmap(netdev); 5622 hns3_nic_uninit_irq(priv); 5623 hns3_clear_all_ring(handle, true); 5624 hns3_reset_tx_queue(priv->ae_handle); 5625 5626 hns3_nic_uninit_vector_data(priv); 5627 5628 hns3_nic_dealloc_vector_data(priv); 5629 5630 hns3_uninit_all_ring(priv); 5631 5632 hns3_put_ring_config(priv); 5633 5634 return 0; 5635 } 5636 5637 int hns3_reset_notify(struct hnae3_handle *handle, 5638 enum hnae3_reset_notify_type type) 5639 { 5640 int ret = 0; 5641 5642 switch (type) { 5643 case HNAE3_UP_CLIENT: 5644 ret = hns3_reset_notify_up_enet(handle); 5645 break; 5646 case HNAE3_DOWN_CLIENT: 5647 ret = hns3_reset_notify_down_enet(handle); 5648 break; 5649 case HNAE3_INIT_CLIENT: 5650 ret = hns3_reset_notify_init_enet(handle); 5651 break; 5652 case HNAE3_UNINIT_CLIENT: 5653 ret = hns3_reset_notify_uninit_enet(handle); 5654 break; 5655 default: 5656 break; 5657 } 5658 5659 return ret; 5660 } 5661 5662 static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num, 5663 bool rxfh_configured) 5664 { 5665 int ret; 5666 5667 ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, 5668 rxfh_configured); 5669 if (ret) { 5670 dev_err(&handle->pdev->dev, 5671 "Change tqp num(%u) fail.\n", new_tqp_num); 5672 return ret; 5673 } 5674 5675 ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT); 5676 if (ret) 5677 return ret; 5678 5679 ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT); 5680 if (ret) 5681 hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT); 5682 5683 return ret; 5684 } 5685 5686 int hns3_set_channels(struct net_device *netdev, 5687 struct ethtool_channels *ch) 5688 { 5689 struct hnae3_handle *h = hns3_get_handle(netdev); 5690 struct hnae3_knic_private_info *kinfo = &h->kinfo; 5691 bool rxfh_configured = netif_is_rxfh_configured(netdev); 5692 u32 new_tqp_num = ch->combined_count; 5693 u16 org_tqp_num; 5694 int ret; 5695 5696 if (hns3_nic_resetting(netdev)) 5697 return -EBUSY; 5698 5699 if (ch->rx_count || ch->tx_count) 5700 return -EINVAL; 5701 5702 if (kinfo->tc_info.mqprio_active) { 5703 dev_err(&netdev->dev, 5704 "it's not allowed to set channels via ethtool when MQPRIO mode is on\n"); 5705 return -EINVAL; 5706 } 5707 5708 if (new_tqp_num > hns3_get_max_available_channels(h) || 5709 new_tqp_num < 1) { 5710 dev_err(&netdev->dev, 5711 "Change tqps fail, the tqp range is from 1 to %u", 5712 hns3_get_max_available_channels(h)); 5713 return -EINVAL; 5714 } 5715 5716 if (kinfo->rss_size == new_tqp_num) 5717 return 0; 5718 5719 netif_dbg(h, drv, netdev, 5720 "set channels: tqp_num=%u, rxfh=%d\n", 5721 new_tqp_num, rxfh_configured); 5722 5723 ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); 5724 if (ret) 5725 return ret; 5726 5727 ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); 5728 if (ret) 5729 return ret; 5730 5731 org_tqp_num = h->kinfo.num_tqps; 5732 ret = hns3_change_channels(h, new_tqp_num, rxfh_configured); 5733 if (ret) { 5734 int ret1; 5735 5736 netdev_warn(netdev, 5737 "Change channels fail, revert to old value\n"); 5738 ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured); 5739 if (ret1) { 5740 netdev_err(netdev, 5741 "revert to old channel fail\n"); 5742 return ret1; 5743 } 5744 5745 return ret; 5746 } 5747 5748 return 0; 5749 } 5750 5751 static const struct hns3_hw_error_info hns3_hw_err[] = { 5752 { .type = HNAE3_PPU_POISON_ERROR, 5753 .msg = "PPU poison" }, 5754 { .type = HNAE3_CMDQ_ECC_ERROR, 5755 .msg = "IMP CMDQ error" }, 5756 { .type = HNAE3_IMP_RD_POISON_ERROR, 5757 .msg = "IMP RD poison" }, 5758 { .type = HNAE3_ROCEE_AXI_RESP_ERROR, 5759 .msg = "ROCEE AXI RESP error" }, 5760 }; 5761 5762 static void hns3_process_hw_error(struct hnae3_handle *handle, 5763 enum hnae3_hw_error_type type) 5764 { 5765 int i; 5766 5767 for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) { 5768 if (hns3_hw_err[i].type == type) { 5769 dev_err(&handle->pdev->dev, "Detected %s!\n", 5770 hns3_hw_err[i].msg); 5771 break; 5772 } 5773 } 5774 } 5775 5776 static const struct hnae3_client_ops client_ops = { 5777 .init_instance = hns3_client_init, 5778 .uninit_instance = hns3_client_uninit, 5779 .link_status_change = hns3_link_status_change, 5780 .reset_notify = hns3_reset_notify, 5781 .process_hw_error = hns3_process_hw_error, 5782 }; 5783 5784 /* hns3_init_module - Driver registration routine 5785 * hns3_init_module is the first routine called when the driver is 5786 * loaded. All it does is register with the PCI subsystem. 5787 */ 5788 static int __init hns3_init_module(void) 5789 { 5790 int ret; 5791 5792 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); 5793 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); 5794 5795 client.type = HNAE3_CLIENT_KNIC; 5796 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s", 5797 hns3_driver_name); 5798 5799 client.ops = &client_ops; 5800 5801 INIT_LIST_HEAD(&client.node); 5802 5803 hns3_dbg_register_debugfs(hns3_driver_name); 5804 5805 ret = hnae3_register_client(&client); 5806 if (ret) 5807 goto err_reg_client; 5808 5809 ret = pci_register_driver(&hns3_driver); 5810 if (ret) 5811 goto err_reg_driver; 5812 5813 return ret; 5814 5815 err_reg_driver: 5816 hnae3_unregister_client(&client); 5817 err_reg_client: 5818 hns3_dbg_unregister_debugfs(); 5819 return ret; 5820 } 5821 module_init(hns3_init_module); 5822 5823 /* hns3_exit_module - Driver exit cleanup routine 5824 * hns3_exit_module is called just before the driver is removed 5825 * from memory. 5826 */ 5827 static void __exit hns3_exit_module(void) 5828 { 5829 pci_unregister_driver(&hns3_driver); 5830 hnae3_unregister_client(&client); 5831 hns3_dbg_unregister_debugfs(); 5832 } 5833 module_exit(hns3_exit_module); 5834 5835 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); 5836 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 5837 MODULE_LICENSE("GPL"); 5838 MODULE_ALIAS("pci:hns-nic"); 5839